How do i record and invert a sound wave in real time using Tkinter, tkSnack and wave modules?
Hi,
I want to record a sound wave from a mic and at the same time invert
it and play the inverted wave.My code goes as follows, however nothing
is written into the E:\inverted.wav file.Thanks in advance for any
help.
from Tkinter import *
root = Tk()
import tkSnack
tkSnack.initializeSnack(root)
t=tkSnack
import wave
import audioop
a=audioop
w=wave
import threading
import os
def fin():
s=t.Sound(file='E:/final.wav')
s.frequency=44100
s.record()#Start recording
def invert():
global f,i,as
f=w.open("E:\\final.wav","r")
frame=f.readframes(1)
i=w.open("E:\\inverted.wav","w")
#sw=f.getsampwidth()
i.setsampwidth(2)
#nf=f.getnframes()
i.setnframes(30*5)
#nc=f.getnchannels()
i.setnchannels(1)
#fr=f.getframerate()
i.setframerate(44100)
f.setpos(220500)
c=0
while frame:
#invert and write it to inverted.wav
i.writeframesraw(audioop.mul(frame,2,-1.0))
c=c+1
frame=f.readframes(1)
if c==100:
as=t.Sound(load='E:/inverted.wav')
as.play()
def stp():
s.stop()
as.stop()
f.close()
i.close()
k=threading.Timer(30.,stp) #Stop recording after 30 sec
k.start()
j=threading.Timer(5.0,invert) #Start reading recorded file
j.start() #after 5sec
fin()
--
http://mail.python.org/mailman/listinfo/python-list
RE: Replace stop words (remove words from a string)
How about - for s in stoplist: string.replace(mystr, s, "") Hope this should work. -Original Message- From: [EMAIL PROTECTED] [mailto:[EMAIL PROTECTED] On Behalf Of BerlinBrown Sent: Thursday, January 17, 2008 1:55 PM To: [email protected] Subject: Replace stop words (remove words from a string) if I have an array of "stop" words, and I want to replace those values with something else; in a string, how would I go about doing this. I have this code that splits the string and then does a difference but I think there is an easier approach: E.g. mystr = kljsldkfjksjdfjsdjflkdjslkf[BAD]Kkjjkkjk[BAD]LSKJFKSFJKSJF;L[BAD2]kjsldf sd; if I have an array stop_list = [ "[BAD]", "[BAD2]" ] I want to replace the values in that list with a zero length string. I had this before, but I don't want to use this approach; I don't want to use the split. line_list = line.lower().split() res = list(set(keywords_list).difference(set(ENTITY_IGNORE_LIST))) -- http://mail.python.org/mailman/listinfo/python-list -- http://mail.python.org/mailman/listinfo/python-list
SSL Server side Client Certficate Verification in M2Crypto
Hi,
I have a small problem using the M2Crypto for SSL certificate verification.
I have a client and a server who wants to get the certificates verified by
the other in order start the communication. I am able to get the server
certificate verified by the client but not the client certificate in the
server.
I have attached the code which I use for this. Kindly tell me where I had
gone wrong.
I would appreciate a quick reply since I have not been able to make progress
in my project due to this problem.
I am using Python 2.6.1 version in Ubuntu 8.10. I have the OpenSSL version
0.9.8 and SWIG 1.33.
The M2Crypto I am using is 0.18.
I am also using my own CA to sign the certificates. The CA certificates are
available with both the server and the client.
Please let me know if you require additional information on this.
Thanks
Karthik
import select
import socket
import sys
import string
import M2Crypto
HOST = "127.0.0.1"
PORT = 5050
BACKLOG = 5
BUFF_SIZE = 1024
from M2Crypto import SSL
class client:
def run(self):
con = SSL.Context('tlsv1')
#con.load_verify_locations('cacert.pem','/home/kchandr1/Desktop/sc/')
##con.load_verify_locations('cacert.pem')
#con.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert, depth = 9)
con.load_client_ca('cacert.pem')
con.load_cert(certfile = "client_crt.pem",keyfile = "client_key.pem")
con.set_client_CA_list_from_file('cacert.pem')
c= SSL.Connection(con)
c.connect((HOST,5050))
if c.get_peer_cert() is not None:
print "Server Certificate verified"
print c.get_verify_result()
print c.get_peer_cert()
con.load_client_ca('cacert.pem')
con.load_cert(certfile = "client_crt.pem",keyfile = "client_key.pem")
else:
print "CLIENT: Not able to get certificate"
sys.exit()
data = raw_input("Enter")
while data:
c.send(data)
data = raw_input("Enter to pass to server")
c.close()
if __name__ == "__main__":
client1 = client()
try:
client1.run()
except KeyboardInterrupt:
print "Keyboard Interrupt recieved"
s.close_socket()
import select
import socket
import sys
import string
HOST = "127.0.0.1"
PORT = 5050
BACKLOG = 5
BUFF_SIZE = 1024
from M2Crypto import SSL
class server:
def run(self):
con = SSL.Context('tlsv1')
con.load_client_ca('cacert.pem')
con.load_cert(certfile = "server_crt.pem",keyfile = "server_key.pem")
con.load_verify_locations('cacert.pem')
#con.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert, depth = 9)
bindsocket = SSL.Connection(con)
bindsocket.bind((HOST,PORT))
bindsocket.listen(BACKLOG)
print "waiting for connection"
(connectsocket, fromaddress) = bindsocket.accept()
c= SSL.Connection(con)
if c.get_peer_cert() is not None:
print "Client Certificate verified"
print c.get_verify_result()
else:
print "Server: Not able to get certificate"
print c.get_verify_result()
print c.get_peer_cert()
sys.exit()
data = connectsocket.read()
while data:
print data
data = connectsocket.read()
connectsocket.write('200 OK\r\n\r\n')
connectsocket.close()
bindsocket.close()
if __name__ == "__main__":
s = server()
try:
s.run()
except KeyboardInterrupt:
print "Keyboard Interrupt recieved"
s.close_socket()
--
http://mail.python.org/mailman/listinfo/python-list
Setting default version among multiple python installations
Hello, I am an absolute linux and python newbie. The linux machine(red hat version 7.2) that i managed to get my hands on had python 1.5(vintage stuff, i guess) in it. I have installed python 2.5 using the source tar. However, when i try to access python, i am taken to the older version only. How do i set python2.5 as the default version? -Karthik -- http://mail.python.org/mailman/listinfo/python-list
Re: Setting default version among multiple python installations
Gabriel Genellina wrote: En Tue, 08 Apr 2008 06:48:54 -0300, Karthik <[EMAIL PROTECTED]> escribió: I am an absolute linux and python newbie. The linux machine(red hat version 7.2) that i managed to get my hands on had python 1.5(vintage stuff, i guess) in it. I have installed python 2.5 using the source tar. However, when i try to access python, i am taken to the older version only. Have you actually compiled and installed it? See http://docs.python.org/dev/using/index.html Yes, i got the source from http://python.org/ftp/python/2.5.2/Python-2.5.2.tgz if i type python2.5 i am able to use the latest python, but if i simply type python it taken me to the older version. (it is a minor annoyance, but I want to know how to fix it) -Karthik -- http://mail.python.org/mailman/listinfo/python-list
Re: Setting default version among multiple python installations
Gabriel Genellina wrote: En Wed, 09 Apr 2008 02:45:28 -0300, Karthik <[EMAIL PROTECTED]> escribió: if i type python2.5 i am able to use the latest python, but if i simply type python it taken me to the older version. (it is a minor annoyance, but I want to know how to fix it) From the README file: There's an executable /usr/bin/python which is Python 1.5.2 on most older Red Hat installations; several key Red Hat tools require this version. Python 2.1.x may be installed as /usr/bin/python2. The Makefile installs Python as /usr/local/bin/python, which may or may not take precedence over /usr/bin/python, depending on how you have set up $PATH. yes, that seems to be the case. i changed the order of entries in $PATH, and now things are working now. thanks for your help. (Lesson learnt: if it says README, better read it completely, dont stop at "Congratulations on getting this far. :-)" ) -Karthik -- http://mail.python.org/mailman/listinfo/python-list
Re: [Python-Dev] annoying dictionary problem, non-existing keys
bvidinli wrote: I posted to so many lists because, this issue is related to all lists, this is an idea for python, this is related to development of python... why are you so much defensive ? i think ideas all important for development of python, software i am sory anyway hope will be helpful. 2008/4/24, Terry Reedy <[EMAIL PROTECTED]>: Python-dev is for discussion of development of future Python. Use python-list / comp.lang.python / gmane.comp.python.general for usage questions. ___ Python-Dev mailing list [EMAIL PROTECTED] http://mail.python.org/mailman/listinfo/python-dev Unsubscribe: http://mail.python.org/mailman/options/python-dev/bvidinli%40gmail.com Is this an acceptable alternative? try: if conf['key1'] == 'something': except KeyError: pass Regards, Karthik -- http://mail.python.org/mailman/listinfo/python-list
Packing a ctypes struct containing bitfields.
Hello Everybody,
I'm trying to create a packed structure in ctypes (with one 64-bit
element that is bitfielded to 48 bits),
unsuccessfully:
===
from ctypes import *
class foo (Structure):
_pack_ = 1
_fields_ = [
("bar",c_ulonglong, 48),
]
print("sizeof(foo) = %d" % sizeof(foo))
===
I'm expecting that this should print 6 - however, on my box, it prints
8.
The following piece of C code, when compiled and run, prints 6, which
is correct.
===
struct foo {
unsigned long long bar: 48;
};
printf("sizeof(foo) = %d", sizeof(foo));
===
So... what am I doing wrong?
Thanks,
Karthik.
--
http://mail.python.org/mailman/listinfo/python-list
Re: Packing a ctypes struct containing bitfields.
On Jun 18, 6:29 pm, Nick Craig-Wood wrote:
> Karthik wrote:
> > Hello Everybody,
>
> > I'm trying to create a packed structure in ctypes (with one 64-bit
> > element that is bitfielded to 48 bits),
> > unsuccessfully:
>
> > ===
> > from ctypes import *
>
> > class foo (Structure):
> > _pack_ = 1
> > _fields_ = [
> > ("bar", c_ulonglong, 48),
> > ]
>
> > print("sizeof(foo) = %d" % sizeof(foo))
> > ===
> > I'm expecting that this should print 6 - however, on my box, it prints
> > 8.
>
> > The following piece of C code, when compiled and run, prints 6, which
> > is correct.
> > ===
> > struct foo {
> > unsigned long long bar: 48;
> > };
>
> > printf("sizeof(foo) = %d", sizeof(foo));
> > ===
>
> > So... what am I doing wrong?
>
> I compiled and ran the above with gcc on my linux box - it prints 8
> unless I add __attribute__((__packed__)) to the struct.
>
> I'm not sure that using bitfields like that is a portable at all
> between compilers let alone architectures.
>
> I'd probably do
>
> from ctypes import *
>
> class foo (Structure):
> _pack_ = 1
> _fields_ = [
> ("bar0", c_uint32),
> ("bar1", c_uint16),
> ]
> def set_bar(self, bar):
> self.bar0 = bar & 0x
> self.bar1 = (bar >> 32) & 0x
> def get_bar(self):
> return (self.bar1 << 32) + self.bar0
> bar = property(get_bar, set_bar)
>
> print "sizeof(foo) = %d" % sizeof(foo)
> f = foo()
> print f.bar
> f.bar = 123456789012345
> print f.bar
>
> Which prints
>
> sizeof(foo) = 6
> 0
> 123456789012345
>
> --
> Nick Craig-Wood --http://www.craig-wood.com/nick
Oops, sorry about the missing __attribute__((packed)) - that was an
error of
omission.
Thank you, Nick :)
I'm looking at some C code that's using bitfields in structs, and I'm
writing
python code that "imports" these structs and messes around with them -
unfortunately,
I'm not at liberty to change the C code to not use bitfields.
Your solution works fine, so that's what I'm going to use.
Thanks,
Karthik.
--
http://mail.python.org/mailman/listinfo/python-list
How to parse the starting and ending of a loop statements in python
My objective is to find the line numbers of the start and the end of a loop statement in python. Example scenario #A.py Line1: a=0 Line2: while a<5: Line3:print a Line4:a=a+1 Desired output: Start of a loop Line2 End of a loop Line4 Current parser code #parser.py with open(a) as f: tree = ast.parse(f.read()) taskline=[] for node in ast.walk(tree): if isinstance(node, (ast.For)) or isinstance(node,(ast.While)): print node.lineno-1 <-- This give line number on for the start of a loop I wanted to achieve the above output. I use AST to parse a given file and determine the occurrence of loops. With AST parsing i am able to find line number for the start of the loop but the line number for ending of the loop is yet to be determined. Is there any way i could parse an entire loop statement and determine its starting and ending line number ? -- http://mail.python.org/mailman/listinfo/python-list
How to implement semaphores in python?
Hello All, I need some help with semaphore implementation between two programs in python. I'd be glad if anyone can give me some help. -- https://mail.python.org/mailman/listinfo/python-list
Transfer a file to httpserver via POST command from curl
I have written a python server app (actually copied from somewhere) using
Flask, that I want to act as a http server. I expect this server to recieve
a POST command (of a file) from CURL and save that file on the server. And
I should be able to download that file when required.
My python scripts are shown below.
app.py
from flask import Flask
UPLOAD_FOLDER = 'home/user/uploads'
app = Flask(__name__)
#app.secret_key = "secret key"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
main.py
import os
import urllib.request
from app import app
from flask import Flask, request, redirect, jsonify
from werkzeug.utils import secure_filename
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in
ALLOWED_EXTENSIONS
@app.route('/file-upload', methods=['POST'])
def upload_file():
# check if the post request has the file part
if 'file' not in request.files:
resp = jsonify({'message' : 'No file part in the request'})
resp.status_code = 400
return resp
file = request.files['file']
if file.filename == '':
resp = jsonify({'message' : 'No file selected for uploading'})
resp.status_code = 400
return resp
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
resp = jsonify({'message' : 'File successfully uploaded'})
resp.status_code = 201
return resp
else:
resp = jsonify({'message' : 'Allowed file types are txt, pdf, png, jpg,
jpeg, gif'})
resp.status_code = 400
return resp
if __name__ == "__main__":
app.run()
I am doing a POST using curl as follows.
curl -X POST --data-binary @/home/user/testfile.txt http://
127.0.0.1:5000/file-upload
Is it really possible to transfer a large binary file from my machine to
the above httpserver via POST command and download it again? If yes, is the
above Flask app enough for that and what am I doing wrong?
Kindly Reply,
Regards,
Karthik.
I am getting 400 Bad Request error from the server. I am new to Flask.
What am I doing wrong. Is it possible to transfer a binary file
(large) from my computer to the above http server via POST command.?
For that purpose is my above code enought? What am I doing wrong?
--
https://mail.python.org/mailman/listinfo/python-list
Implementing CURL command using libcurl in C/C++
The `CURL` command that I am using is shown below.
curl -F 'file=@/home/karthik/Workspace/downloadfile.out'
http://127.0.0.1:5000/file-upload --verbose
The response from the server is shown below.
* Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 5000 (#0)
> POST /file-upload HTTP/1.1
> Host: 127.0.0.1:5000
> User-Agent: curl/7.47.0
> Accept: */*
> Content-Length: 663876790
> Expect: 100-continue
> Content-Type: multipart/form-data;
boundary=4e96ef0714498bd7
>
< HTTP/1.1 100 Continue
* HTTP 1.0, assume close after body
< HTTP/1.0 201 CREATED
< Content-Type: application/json
< Content-Length: 46
< Server: Werkzeug/0.16.0 Python/3.5.2
< Date: Sat, 14 Dec 2019 07:05:15 GMT
<
{
"message": "File successfully uploaded"
}
* Closing connection 0
I want to implement the same command in C/C++ using libcurl. I am using the
following function.
int FileUploadDownload::upload(const std::string &filename, const
std::string &url) {
CURL *curl;
CURLcode res;
struct stat file_info;
curl_off_t speed_upload, total_time;
FILE *fd;
fd = fopen(filename.c_str(), "rb");
if(!fd) {
m_logger->errorf("unable to open file: %s\n",strerror(errno));
return 1;
}
if(fstat(fileno(fd), &file_info) != 0) {
m_logger->errorf("unable to get file stats: %s\n",strerror(errno));
return 2;
}
std::cout << "filename : "<< filename << std::endl;
std::cout << "url : " << url << std::endl;
curl = curl_easy_init();
if(curl) {
curl_easy_setopt(curl, CURLOPT_URL,
url.c_str());
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, filename.c_str());
curl_easy_setopt(curl, CURLOPT_POST, 1L);
curl_easy_setopt(curl, CURLOPT_READDATA, fd);
curl_easy_setopt(curl, CURLOPT_INFILESIZE_LARGE,
(curl_off_t) file_info.st_size);
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
m_logger->errorf("curl_easy_perform() failed:
%s\n",curl_easy_strerror(res));
} else {
curl_easy_getinfo(curl, CURLINFO_SPEED_UPLOAD, &speed_upload);
curl_easy_getinfo(curl, CURLINFO_TOTAL_TIME, &total_time);
m_logger->infof("Speed: %" CURL_FORMAT_CURL_OFF_T " bytes/sec
during %"
CURL_FORMAT_CURL_OFF_T ".%06ld seconds\n",
speed_upload,
(total_time / 100), (long) (total_time %
100));
}
}
return 0;
}
The below is the result that I get from the server.
The result that I get is shown below.
* Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 5000 (#0)
> POST /file-upload HTTP/1.1
> Host: 127.0.0.1:5000
> User-Agent: curl/7.47.0
> Accept: */*
> Content-Length: 550
> Expect: 100-continue
> Content-Type: multipart/form-data;
boundary=c8ef4837136fca99
>
< HTTP/1.1 100 Continue
* HTTP 1.0, assume close after body
< HTTP/1.0 201 CREATED
< Content-Type: application/json
< Content-Length: 46
< Server: Werkzeug/0.16.0 Python/3.5.2
< Date: Sat, 14 Dec 2019 07:09:47 GMT
<
{
"message": "File successfully uploaded"
}
* Closing connection 0
My aim is to mimic the curl command above in the C/C++ code below. What am I
doing wrong ?
--
https://mail.python.org/mailman/listinfo/python-list
select.select()
please help me.. what does the following line do? read_sockets,write_sockets,error_sockets = select.select(CONNECTION_LIST,[],[]) -- https://mail.python.org/mailman/listinfo/python-list
Re: select.select()
On Friday, 22 November 2013 18:29:12 UTC-8, Steven D'Aprano wrote: > On Fri, 22 Nov 2013 17:42:07 -0800, Bhanu Karthik wrote: > > > > > please help me.. what does the following line do? > > > > > > read_sockets,write_sockets,error_sockets = > > > select.select(CONNECTION_LIST,[],[]) > > > > The select.select function takes three arguments (plus an optional > > fourth): > > > > select.select(read_list, write_list, exception_list) > > > > Each list should a list of the file descriptors you want to wait for. On > > Windows, only sockets are valid file descriptors. On Unix or Linux, you > > can use sockets, open file objects, or low-level file descriptors. > > > > In this case, you only pass CONNECTION_LIST, the others are empty lists > > []. CONNECTION_LIST is probably a list of sockets to be read. When they > > are ready for reading, select() will return three lists: > > > > read_sockets - a list of the sockets open for reading > > > > write_sockets and error_sockets should both be empty lists, since you > > didn't request any of those to be opened. > > > > > > > > -- > > Steven Thank you ,your message answered the question exactly. instead of using select.select,can we do like below? read_sockets=connection_list write_sockets=[] error_sockets=[] -- https://mail.python.org/mailman/listinfo/python-list
Re: select.select()
On Friday, 22 November 2013 18:15:10 UTC-8, Roy Smith wrote:
> In article ,
>
> Bhanu Karthik wrote:
>
>
>
> > please help me.. what does the following line do?
>
> >
>
> > read_sockets,write_sockets,error_sockets =
>
> > select.select(CONNECTION_LIST,[],[])
>
>
>
> This is a little tricky.
>
>
>
> First,read the docs at http://docs.python.org/2/library/select.html.
>
> There's a lot of complicated stuff there, but just concentrate on the
>
> description of the select.select() call for now.
>
>
>
> Imagine a process which has a lot of network connections open. A great
>
> example would be something like a MUD (Multi User Dungeon). You've got
>
> one server process(*) and a bunch of clients which have all made TCP
>
> connections over individual sockets.
>
>
>
> Each client will be sending commands asynchronously, and the server
>
> needs to handle this. You need some way to figure out which of those
>
> sockets have something that's been sent to you (which you need to
>
> process) and which are just sitting idle. That's where select() comes
>
> in. It gives you a way to say, "Here's a list of sockets. Sleep until
>
> one of them has something available for me to read, and let me know
>
> which one."
>
>
>
> One bit of complication is that you can also check for sockets which are
>
> ready to be written on, and sockets which have some sort of error
>
> condition. That's why the call returns a 3-tuple. But, for now, let's
>
> just concentrate on reading.
>
>
>
> Here's a very simplistic server which uses select():
>
>
>
> import socket
>
> import select
>
>
>
> sock = socket.socket()
>
> sock.bind(('localhost', 23000))
>
> sock.listen(10)
>
>
>
> # Accept four connections.
>
> connections = []
>
> for i in range(4):
>
> s, addr = sock.accept()
>
> print "Got connection from %s" % str(addr)
>
> connections.append(s)
>
>
>
> while True:
>
> readable, _, _ = select.select(connections, [], [])
>
> print "ready for reading: %s" % readable
>
> for s in readable:
>
> data = s.recv(1024)
>
> print "Read from %s: %s" % (s, data)
>
>
>
> You can write a little client which connects to this (I've got one I
>
> used for testing, but I'll leave it to you to write one yourself as an
>
> exercise). Connect four clients, and have them send some input in
>
> random order.
>
>
>
> Actually, this server has a bug (which you'll discover as soon as you
>
> close one of the four connection), but it should serve to illustrate the
>
> basic concept.
>
>
>
>
>
> (*) I'm not sure if real MUDs are programmed this way, but it's a
>
> plausible architecture. For simplicity sake, I'm assuming a
>
> single-threaded server.
Thank you for your reply.your reply helped me figure out concept.
--
https://mail.python.org/mailman/listinfo/python-list
stuck at this from so much time,need help....please ..
data = sock.recv(RECV_BUFFER)
username = str(sock.getpeername())
username = usernames[username]
if command == "/quit":
print data
sock.send("bye")
sock.close()
CONNECTION_LIST.remove(sock)
even if the received data is '/quit' the if condition not excuting...please
help.
--
https://mail.python.org/mailman/listinfo/python-list
Re: stuck at this from so much time,need help....please ..
On Saturday, 23 November 2013 14:23:08 UTC-8, Chris Angelico wrote:
> On Sun, Nov 24, 2013 at 9:15 AM, Bhanu Karthik
>
> wrote:
>
> > data = sock.recv(RECV_BUFFER)
>
> > username = str(sock.getpeername())
>
> > username = usernames[username]
>
> > if command == "/quit":
>
> > print data
>
> > sock.send("bye")
>
> > sock.close()
>
> > CONNECTION_LIST.remove(sock)
>
> >
>
> > even if the received data is '/quit' the if condition not excuting...please
> > help.
>
>
>
> At what point is command set? You're setting data here; is command
>
> supposed to be derived from data?
>
>
>
> This looks like a MUD or IRC style of server, which would suggest that
>
> commands are terminated by end-of-line. You may need to take content
>
> from the socket (currently in data) and split it off on either "\r\n"
>
> or "\n". But it's hard to tell from this small snippet.
>
>
>
> ChrisA
sorry its not command its data
I miss wrote it here...
--
https://mail.python.org/mailman/listinfo/python-list
Re: stuck at this from so much time,need help....please ..
On Saturday, 23 November 2013 14:23:08 UTC-8, Chris Angelico wrote:
> On Sun, Nov 24, 2013 at 9:15 AM, Bhanu Karthik
>
> wrote:
>
> > data = sock.recv(RECV_BUFFER)
>
> > username = str(sock.getpeername())
>
> > username = usernames[username]
>
> > if command == "/quit":
>
> > print data
>
> > sock.send("bye")
>
> > sock.close()
>
> > CONNECTION_LIST.remove(sock)
>
> >
>
> > even if the received data is '/quit' the if condition not excuting...please
> > help.
>
>
>
> At what point is command set? You're setting data here; is command
>
> supposed to be derived from data?
>
>
>
> This looks like a MUD or IRC style of server, which would suggest that
>
> commands are terminated by end-of-line. You may need to take content
>
> from the socket (currently in data) and split it off on either "\r\n"
>
> or "\n". But it's hard to tell from this small snippet.
>
>
>
> ChrisA
data = sock.recv(RECV_BUFFER)
username = str(sock.getpeername())
username = usernames[username]
if data == "/quit":
print data
sock.send("bye")
sock.close()
CONNECTION_LIST.remove(sock)
this is exact code..
it is not even entering the if ...
I tried ( c= (data is '/quit')if c)
when i print c ,its printing falseI dont understand what is
happening...please help..
--
https://mail.python.org/mailman/listinfo/python-list
Re: stuck at this from so much time,need help....please ..
On Saturday, 23 November 2013 14:37:09 UTC-8, Roy Smith wrote: > In article <[email protected]>, > > Bhanu Karthik wrote: > > > > > data = sock.recv(RECV_BUFFER) > > > username = str(sock.getpeername()) > > > username = usernames[username] > > > if data == "/quit": > > > print data > > > sock.send("bye") > > > sock.close() > > > CONNECTION_LIST.remove(sock) > > > > > > > > > this is exact code.. > > > it is not even entering the if ... > > > I tried ( c= (data is '/quit')if c) > > > > That can't be the exact code. What you posted is a syntax error because > > the line after the "if" statement isn't indented properly. indentation is correct when I trying to paste it here,it is showing like it is unindented. -- https://mail.python.org/mailman/listinfo/python-list
GAPI -- Sharing a post to Social Networking Pages from my App
Hi Experts, I am trying to post on facebook and google plus page from my application. I am using facebook-sdk an d I am able to post using local machine but I am not able to post from dev server. Can Anyone Please help me on this. Thanks, Karthik -- https://mail.python.org/mailman/listinfo/python-list
Re: GAPI -- Sharing a post to Social Networking Pages from my App
On Tuesday, March 22, 2016 at 9:54:53 AM UTC+5:30, Mark Lawrence wrote:
> On 22/03/2016 04:14, Karthik Reddy wrote:
> > Hi Experts,
> >
> > I am trying to post on facebook and google plus page from my application.
> > I am using facebook-sdk an d I am able to post using local machine but I am
> > not able to post from dev server.
> >
> > Can Anyone Please help me on this.
> >
> > Thanks,
> > Karthik
> >
>
> Please state your OS and Python version, the code that you've tried and
> exactly what went wrong, including the full traceback if there is one.
>
> --
> My fellow Pythonistas, ask not what our language can do for you, ask
> what you can do for our language.
>
> Mark Lawrence
Hi Lawrence Thank you for your quick reply .I am using Ubuntu 14.04 and python
2.7.6 version.
In Template I have written
publish
{% block google_script %}
window.___gcfg = {
lang: 'en',
parsetags: 'onload',
isSignedOut: true
};
// var sour = document.getElementById("1");
// // console.log(sour)
// var idd=document.getElementById("1").id;
// console.log(idd)
function(){ window.open = $("#sharePost").click(); };
https://apis.google.com/js/platform.js"</a>; async defer>
{% endblock %}
{% block share %}
var options = {
contenturl: '<a rel="nofollow" href="http://localhost:8000">http://localhost:8000</a>',
clientid: 'X.apps.googleusercontent.com',
cookiepolicy: 'single_host_origin',
prefilltext: 'New project is added',
calltoactionurl : '<a rel="nofollow" href="https://plus.google.com/u/0/112729364286841783635/posts">https://plus.google.com/u/0/112729364286841783635/posts</a>'
};
// Call the render method when appropriate within your app to display
// the button.
gapi.interactivepost.render('sharePost', options);
console.log(new Error().stack);
{% endblock %}
{% block google_style %}
iframe[src^="<a rel="nofollow" href="https://apis.google.com/u/0/_/widget/oauthflow/toast"">https://apis.google.com/u/0/_/widget/oauthflow/toast"</a>;] {
display: none;
}
{% endblock %}
--
https://mail.python.org/mailman/listinfo/python-list
Re: GAPI -- Sharing a post to Social Networking Pages from my App
On Tuesday, March 22, 2016 at 9:54:53 AM UTC+5:30, Mark Lawrence wrote: > On 22/03/2016 04:14, Karthik Reddy wrote: > > Hi Experts, > > > > I am trying to post on facebook and google plus page from my application. > > I am using facebook-sdk an d I am able to post using local machine but I am > > not able to post from dev server. > > > > Can Anyone Please help me on this. > > > > Thanks, > > Karthik > > > > Please state your OS and Python version, the code that you've tried and > exactly what went wrong, including the full traceback if there is one. > > -- > My fellow Pythonistas, ask not what our language can do for you, ask > what you can do for our language. > > Mark Lawrence The error I am getting is "Uncaught ReferenceError: gapi is not defined" -- https://mail.python.org/mailman/listinfo/python-list
Re: GAPI -- Sharing a post to Social Networking Pages from my App
On Tuesday, March 22, 2016 at 4:48:37 PM UTC+5:30, Steven D'Aprano wrote: > On Tue, 22 Mar 2016 09:37 pm, Karthik Reddy wrote: > > > The error I am getting is "Uncaught ReferenceError: gapi is not defined" > > > Have you tried googling for it? That's a Javascript error: > > https://duckduckgo.com/html/?q=uncaught+reference+error+gapi+is+not+defined > > > > -- > Steven Yup I googled it I am not getting where I need to modify -- https://mail.python.org/mailman/listinfo/python-list
Re: GAPI -- Sharing a post to Social Networking Pages from my App
On Tuesday, March 22, 2016 at 4:55:40 PM UTC+5:30, Karthik Reddy wrote: > On Tuesday, March 22, 2016 at 4:48:37 PM UTC+5:30, Steven D'Aprano wrote: > > On Tue, 22 Mar 2016 09:37 pm, Karthik Reddy wrote: > > > > > The error I am getting is "Uncaught ReferenceError: gapi is not defined" > > > > > > Have you tried googling for it? That's a Javascript error: > > > > https://duckduckgo.com/html/?q=uncaught+reference+error+gapi+is+not+defined > > > > > > > > -- > > Steven > Yup I googled it I am not getting where I need to modify Is my approach is correct or if not Is there any other approach Please help me with this. -- https://mail.python.org/mailman/listinfo/python-list
Re: GAPI -- Sharing a post to Social Networking Pages from my App
On Tuesday, March 22, 2016 at 4:48:37 PM UTC+5:30, Steven D'Aprano wrote: > On Tue, 22 Mar 2016 09:37 pm, Karthik Reddy wrote: > > > The error I am getting is "Uncaught ReferenceError: gapi is not defined" > > > Have you tried googling for it? That's a Javascript error: > > https://duckduckgo.com/html/?q=uncaught+reference+error+gapi+is+not+defined > > > > -- > Steven Hi I resolved the error but I am not able to post my post . I tried https://developers.google.com/+/domains/authentication/#authorizing_requests_with_oauth_20 I am getting the below error When i copied the url. Error: invalid_client The OAuth client was not found. Even i gave Client_id correctly Thanks, Karthik -- https://mail.python.org/mailman/listinfo/python-list
Re: GAPI -- Sharing a post to Social Networking Pages from my App
On Wednesday, March 23, 2016 at 9:43:21 AM UTC+5:30, Karthik Reddy wrote: > On Tuesday, March 22, 2016 at 4:48:37 PM UTC+5:30, Steven D'Aprano wrote: > > On Tue, 22 Mar 2016 09:37 pm, Karthik Reddy wrote: > > > > > The error I am getting is "Uncaught ReferenceError: gapi is not defined" > > > > > > Have you tried googling for it? That's a Javascript error: > > > > https://duckduckgo.com/html/?q=uncaught+reference+error+gapi+is+not+defined > > > > > > > > -- > > Steven > > Hi I resolved the error but I am not able to post my post . > I tried > https://developers.google.com/+/domains/authentication/#authorizing_requests_with_oauth_20 > > I am getting the below error When i copied the url. > > Error: invalid_client > > The OAuth client was not found. > > Even i gave Client_id correctly > > Thanks, > Karthik Hi Steven I resolved this error also Now I am getting a pop up for google sharing but If click on share button its not sharing.Any help please -- https://mail.python.org/mailman/listinfo/python-list
Beginner
Hi all
I am python beginner I am trying the below code and getting incorrect syntax in
python (3.3.2)
number = 23
running = True
while running:
guess = int(raw_input('Enter an integer : '))
if guess == number:
print ("Congratulations, you guessed it.")
running = False # this causes the while loop to stop
elif guess < number:
print ("No, it is a little higher than that.")
else:
print ("No, it is a little lower than that.")
else:
print ("The while loop is over.")
# Do anything else you want to do here
print ("Done")
Please help me with this
Thank you in advance
--
https://mail.python.org/mailman/listinfo/python-list
Re: Beginner
Now Its working Thanks a lot Steven -- https://mail.python.org/mailman/listinfo/python-list
python
I worked as a weblogic administrator and now i am changing to development and i am very much interested in python . please suggest me what are the things i need to learn more rather than python to get an I.T job. I came to know about Django but i am in a confusion please help me . -- https://mail.python.org/mailman/listinfo/python-list
Re: python
Thank you, but from by reaserch i got these requirements .. Python, django, Twisted, MySQL, PyQt, PySide, xPython. *Technical proficiency with Python and Django. *Technical proficiency in JavaScript. *Experience with MySQL / PgSQL. *Unix/Linux expertise. *Experience with MVC design patterns and solid algorithm skills. Core Python, DJango Framework, Web2Py, Google App engine, CherryPy ( Basic Introduction) The problem for me is whether i have to learn all these technologies to work as a python developer.. On Tuesday, February 25, 2014 12:58:15 AM UTC+5:30, CM wrote: > On Monday, February 24, 2014 3:31:11 AM UTC-5, Karthik Reddy wrote: > > > I worked as a weblogic administrator and now i am changing to development > > and i am very much interested in python . please suggest me what > > are the things i need to learn more rather than python to get an I.T job. > > I came to know about Django but i am in a confusion please help me > > . > > > > I recommend you look at job advertisements in areas you'd like to work (both > > areas of the world and areas within IT) and see what they seem to want. > > > > Also, consider more informative subject lines to future posts. :D -- https://mail.python.org/mailman/listinfo/python-list
Re: python
On Monday, February 24, 2014 2:01:11 PM UTC+5:30, Karthik Reddy wrote: > I worked as a weblogic administrator and now i am changing to development and > i am very much interested in python . please suggest me what are the > things i need to learn more rather than python to get an I.T job. I came to > know about Django but i am in a confusion please help me . Thank you for guidance -- https://mail.python.org/mailman/listinfo/python-list
Python JSON processing - extra data error
I have the following python program to read a set of JSON files do some
processing on it and dump them back to the same folder. However When I run the
below program and then try to see the output of the JSON file using
`cat file.json | python -m json.tool`
I get the following error
`extra data: line 1 column 307 - line 1 column 852 (char 306 - 851)`
What is wrong with my program?
#Process 'new' events to extract more info from 'Messages'
rootDir = '/home/s_parts'
for dirName, subdirList, fileList in os.walk(rootDir):
print('Found directory: %s' % dirName)
for fname in fileList:
fname='s_parts/'+fname
with open(fname, 'r+') as f:
json_data = json.load(f)
et = json_data['Et']
ms = json_data['Ms']
if (event == 'a.b.c.d') or (event == 'e.f.g.h'):
url = re.sub('.+roxy=([^& ]*).*', r'\1', ms)
nt = re.findall(r"NT:\s*([^,)]*)",ms)[0]
bt = re.findall(r"BT:\s*([^,)]*)",ms)[0]
xt = re.findall(r"XT:\s*([^,)]*)",ms)[0]
appde = ms.split('Appde:')[1].strip().split('')[0]
version = ms.split('version:')[1].strip().split('')[0]
json_data["url"] = url
json_data["BT"] = bt
json_data["XT"] = xt
json_data["NT"] = nt
json_data["Appde"] = appde
json_data["version"] = version
else:
json_data["url"] = "null"
json_data["BT"] = "null"
json_data["XT"] = "null"
json_data["NT"] = "null"
json_data["Appde"] = "null"
json_data["version"] = "null"
json.dump(json_data,f)
If I do a `file` command on the output file I get
`events_parts/data_95: ASCII text, with very long lines, with no line
terminators`
--
https://mail.python.org/mailman/listinfo/python-list
Error in processing JSON files in Python
I have the following python program to read a set of JSON files do some
processing on it and dump them back to the same folder. However When I run the
below program and then try to see the output of the JSON file using
`cat file.json | python -m json.tool`
I get the following error
`extra data: line 1 column 307 - line 1 column 852 (char 306 - 851)`
What is wrong with my program?
#Process 'new' events to extract more info from 'Messages'
rootDir = '/home/s_parts'
for dirName, subdirList, fileList in os.walk(rootDir):
print('Found directory: %s' % dirName)
for fname in fileList:
fname='s_parts/'+fname
with open(fname, 'r+') as f:
json_data = json.load(f)
et = json_data['Et']
ms = json_data['Ms']
if (event == 'a.b.c.d') or (event == 'e.f.g.h'):
url = re.sub('.+roxy=([^& ]*).*', r'\1', ms)
nt = re.findall(r"NT:\s*([^,)]*)",ms)[0]
bt = re.findall(r"BT:\s*([^,)]*)",ms)[0]
xt = re.findall(r"XT:\s*([^,)]*)",ms)[0]
appde = ms.split('Appde:')[1].strip().split('')[0]
version = ms.split('version:')[1].strip().split('')[0]
json_data["url"] = url
json_data["BT"] = bt
json_data["XT"] = xt
json_data["NT"] = nt
json_data["Appde"] = appde
json_data["version"] = version
else:
json_data["url"] = "null"
json_data["BT"] = "null"
json_data["XT"] = "null"
json_data["NT"] = "null"
json_data["Appde"] = "null"
json_data["version"] = "null"
json.dump(json_data,f)
If I do a `file` command on the output file I get
`s_parts/data_95: ASCII text, with very long lines, with no line terminators`
--
https://mail.python.org/mailman/listinfo/python-list
Decoding JSON file using python
I have the JSON structure shown below and the python code shown below to
manipulate the JSON structure.
import json
json_input = {
"msgType": "0",
"tid": "1",
"data": "[{\"Severity\":\"warn\",\"Subject\":\"Reporting
\",\"Message\":\"tdetails:{\\\"Product\\\":\\\"Gecko\\\",\\\"CPUs\\\":8,\\\"Language\\\":\\\"en-GB\\\",\\\"isEvent\\\":\\\">
"Timestamp": "1432703193431",
"Host": "myserver.com",
"Agent": "Experience",
"AppName": "undefined",
"AppInstance": "my_server",
"Group": "UndefinedGroup"
}
data = json_input['data']
tdetails = data['Message']
print('json_input {} \n\ndata {} \n\n tdetails
{}\n\n'.format(json_input,data,tdetails))
I am getting the error.
Traceback (most recent call last):
File "test_json.py", line 19, in
tdetails = data['Message']
TypeError: string indices must be integers, not str
The JSON structure is valid as shown by http://jsonlint.com/
I want to be able to access the different fields inside `data` such as
`severity`, `subject` and also fields inside `tdetails` such as `CPUs` and
`Product`. How do I do this?
--
https://mail.python.org/mailman/listinfo/python-list
Re: Decoding JSON file using python
I tried modifying the program as follows as per your suggestion.Doesn't seem to
work.
import simplejson as json
import cjson
json_input = { "msgType": "0",
"tid": "1",
"data": "[{\"Severity\":\"warn\",\"Subject\":\"Reporting
\",\"Message\":\"tdetails:{\\\"Product\\\":\\\"Gecko\\\",\\\"CPUs\\\":8,\\\"Language\\\":\\\"en-GB\\\",\\\"isEvent\\\":\\\">
"Timestamp": "1432703193431",
"Host": "myserver.com",
"Agent": "Experience",
"AppName": "undefined",
"AppInstance": "my_server",
"Group": "UndefinedGroup"
}
print('json input original {} \n\n'.format(json_input))
data = json_input['data']
print('data {} \n\n'.format(data))
message = json.loads(data)
print('message {} \n\n'.format(message['Message']))
I get the following error.
Traceback (most recent call last):
File "test_json.py", line 23, in
print('message {} \n\n'.format(message['Message']))
TypeError: list indices must be integers, not str
karthik.sharma@aukksharma2:~$ vim test_json.py
On Thursday, 28 May 2015 11:14:44 UTC+12, Cameron Simpson wrote:
> On 27May2015 15:23, Karthik Sharma wrote:
> >I have the JSON structure shown below and the python code shown below to
> >manipulate the JSON structure.
> >
> >import json
> >
> >json_input = {
> >"msgType": "0",
> >"tid": "1",
> >"data": "[{\"Severity\":\"warn\",\"Subject\":\"Reporting
> > \",\"Message\":\"tdetails:{\\\"Product\\\":\\\"Gecko\\\",\\\"CPUs\\\":8,\\\"Language\\\":\\\"en-GB\\\",\\\"isEvent\\\":\\\">
> >"Timestamp": "1432703193431",
> >"Host": "myserver.com",
> >"Agent": "Experience",
> >"AppName": "undefined",
> >"AppInstance": "my_server",
> >"Group": "UndefinedGroup"
> >}
> >
> >
> >data = json_input['data']
> >tdetails = data['Message']
> >print('json_input {} \n\ndata {} \n\n tdetails
> > {}\n\n'.format(json_input,data,tdetails))
> >
> >I am getting the error.
> >
> >Traceback (most recent call last):
> > File "test_json.py", line 19, in
> >tdetails = data['Message']
> >TypeError: string indices must be integers, not str
>
> That will be because of this line:
>
> tdetails = data['Message']
>
> because "data" is just the string from your dict.
>
> >The JSON structure is valid as shown by http://jsonlint.com/
> > I want to be able to access the different fields inside `data` such as
> > `severity`, `subject` and also fields inside `tdetails` such as `CPUs` and
> > `Product`. How do I do this?
>
> Then you need to decode "data". Example (untested):
>
> data_decoded = json.loads(data)
>
> and then access:
>
> data_decoded['Message']
>
> Cheers,
> Cameron Simpson
>
> Here's a great .sig I wrote, so good it doesn't rhyme.
> Jon Benger
--
https://mail.python.org/mailman/listinfo/python-list
Plotting timeseries from a csv file using matplotlib
I have some csv data in the following format.
Ln Dr Tag Lab 0:01 0:02 0:03 0:04 0:05 0:06 0:07
0:08 0:09
L0 St vT 4R 0 0 00 0
0 00 0
L2 Tx st 4R 8 8 88 8
888 8
L2 Tx ss 4R 1 1 96 1
006 7
I want to plot a timeseries graph using the columns (`Ln` , `Dr`, `Tg`,`Lab`)
as the keys and the `0:0n ` field as values on a timeseries graph.I want all
the timeseries to be plotted on a single timeseries?
I have the following code.
#!/usr/bin/env python
import matplotlib.pyplot as plt
import datetime
import numpy as np
import csv
import sys
with open("test.csv", 'r', newline='') as fin:
reader = csv.DictReader(fin)
for row in reader:
key = (row['Ln'], row['Dr'], row['Tg'],row['Lab'])
#code to extract the values and plot a timeseries.
~
How do I extract all the values in columns `0:0n` without induviduall
specifying each one of them.
--
https://mail.python.org/mailman/listinfo/python-list
Plotting a timeseris graph from pandas dataframe using matplotlib
I have the following data in a csv file
SourceIDBSs hour Type
7208 87 11MAIN
11060 67 11MAIN
3737 88 11MAIN
9683 69 11MAIN
9276 88 11MAIN
7754 62 11MAIN
1 80 12MAIN
9276 88 12MAIN
1 80 12MAIN
6148 70 12MAIN
1 80 12MAIN
9866 80 12SUB
9866 78 13MAIN
9866 78 13SUB
20729 82 14MAIN
9276 88 14MAIN
1 80 15MAIN
20190 55 15MAIN
7208 85 15MAIN
7208 86 15MAIN
7754 61 16MAIN
8968 91 16MAIN
3737 88 16MAIN
9683 69 16MAIN
20729 81 16MAIN
9704 68 16MAIN
1 87 16PAN
I have the following python code.I want to plot a graph with the following
specifications.
For each `SourceID` and `Type` I want to plot a graph of `BSs` over time. I
would prefer if each `SourceID` and `Type` is a subplot on single plot.I have
tried a lot of options using groupby, but can't seem to get it work.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
COLLECTION = 'NEW'
DATA = r'C:\Analysis\Test\{}'.format(COLLECTION)
INPUT_FILE = DATA + r'\in.csv'
OUTPUT_FILE = DATA + r'\out.csv'
with open(INPUT_FILE) as fin:
df = pd.read_csv(INPUT_FILE,
usecols=["SourceID", 'hour','BSs','Type'],
header=0)
df.drop_duplicates(inplace=True)
df.reset_index(inplace=True)
--
https://mail.python.org/mailman/listinfo/python-list
ZMQError: Resource temporarily unavailable
I am using zero-mq for IPC between two machines.
My zmq function is given below
def recieve_messages(self):
string = self.sub_socket.recv(flags=zmq.NOBLOCK)
print('flow mod messages recieved {}'.format(string))
When I run the program however I get the following error.
string = self.sub_socket.recv(flags=zmq.NOBLOCK)
File "socket.pyx", line 616, in zmq.core.socket.Socket.recv
(zmq/core/socket.c:5961)
File "socket.pyx", line 650, in zmq.core.socket.Socket.recv
(zmq/core/socket.c:5832)
File "socket.pyx", line 119, in zmq.core.socket._recv_copy
(zmq/core/socket.c:1669)
ZMQError: Resource temporarily unavailable
Can someone explain what is likely causing this error.
--
https://mail.python.org/mailman/listinfo/python-list
RE : Gnuplot
Hello, I am having a few issues interfacing gnuplot with python. When I try to
run the demo.py file I get the following error messages:
Traceback (most recent call last):
File "C:\Program Files\Common
Files\dSPACE\Python25\lib\site-packages\Pythonwin\pywin\framework\scriptutils.py",
line 310, in RunScript
exec codeObject in __main__.__dict__
File "C:\Program Files\Common
Files\dSPACE\Python25\Lib\site-packages\Gnuplot\demo.py", line 110, in
demo()
File "C:\Program Files\Common
Files\dSPACE\Python25\Lib\site-packages\Gnuplot\demo.py", line 36, in demo
g.reset()
File "C:\Program Files\Common
Files\dSPACE\Python25\Lib\site-packages\Gnuplot\_Gnuplot.py", line 366, in reset
self('reset')
File "C:\Program Files\Common
Files\dSPACE\Python25\Lib\site-packages\Gnuplot\_Gnuplot.py", line 210, in
__call__
self.gnuplot(s)
File "C:\Program Files\Common
Files\dSPACE\Python25\lib\site-packages\Gnuplot\gp_win32.py", line 130, in
__call__
self.write(s + '\n')
IOError: [Errno 22] Invalid argument
After reading other users having similar problems, I altered the
'gnuplot_command' line in gp_win32.py to 'gnuplot_command = "c:\Program
Files\wgnuplot\bin\pgnuplot.exe", in an attempt to directly get the
pgnuplot.exe(which I believe is the root of the problem). However, I still get
the same error message. Any ideas on how to solve this?
Thanks
Note: If the reader of this message is not the intended recipient, or an
employee or agent responsible for delivering this message to the intended
recipient, you are hereby notified that any dissemination, distribution or
copying of this communication is strictly prohibited. If you have received this
communication in error, please notify us immediately by replying to the message
and deleting it from your computer. Thank you.
--
http://mail.python.org/mailman/listinfo/python-list
query from sqlalchemy returns AttributeError: 'NoneType' object
.
if not packet.parsed:
log.warning("Ignoring incomplete packet")
return
packet_in = event.ofp # The actual ofp_packet_in message.
#self.act_like_hub(packet, packet_in)
self.act_like_switch(packet, packet_in)
def launch ():
"""
Starts the component
"""
def start_switch (event):
log.debug("Controlling %s" % (event.connection,))
Tutorial(event.connection)
core.openflow.addListenerByName("ConnectionUp", start_switch)
When I run the above code I get the following error:
The problem that I am facing is for some reason if I use
if session.query(exists().where(SourcetoPort.src_address ==
str(packet.dst))).scalar() is not None:
in place of count query.
#if
session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).count():
The querying from the database
q_res =
session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).first()
self.send_packet(packet_in.buffer_id, packet_in.data,q_res.port_no,
packet_in.in_port)
is giving the following error:
DEBUG:core:POX 0.1.0 (betta) going up...
DEBUG:core:Running on CPython (2.7.3/Aug 1 2012 05:14:39)
DEBUG:core:Platform is
Linux-3.5.0-23-generic-x86_64-with-Ubuntu-12.04-precise
INFO:core:POX 0.1.0 (betta) is up.
DEBUG:openflow.of_01:Listening on 0.0.0.0:6633
INFO:openflow.of_01:[00-00-00-00-00-02 1] connected
DEBUG:tutorial:Controlling [00-00-00-00-00-02 1]
got info from the database
ERROR:core:Exception while handling Connection!PacketIn...
Traceback (most recent call last):
File "/home/karthik/pox/pox/lib/revent/revent.py", line 234, in
raiseEventNoErrors
return self.raiseEvent(event, *args, **kw)
File "/home/karthik/pox/pox/lib/revent/revent.py", line 281, in raiseEvent
rv = event._invoke(handler, *args, **kw)
File "/home/karthik/pox/pox/lib/revent/revent.py", line 159, in _invoke
return handler(self, *args, **kw)
File "/home/karthik/pox/tutorial.py", line 118, in _handle_PacketIn
self.act_like_switch(packet, packet_in)
File "/home/karthik/pox/tutorial.py", line 86, in act_like_switch
self.send_packet(packet_in.buffer_id, packet_in.data,q_res.port_no,
packet_in.in_port)
AttributeError: 'NoneType' object has no attribute 'port_no'
got info from the database
ERROR:core:Exception while handling Connection!PacketIn...
--
http://mail.python.org/mailman/listinfo/python-list
multiple python versions of a 3rd party application/libarary
hi, I maintain applications/libraries which I upgrade often at a different location. For example if I maintain mercurial at /opt/sfw/mercurial/0.9.3 I have PYTHONPATH set to /opt/sfw/mercurial/0.9.3/lib/python2.4/site-packages. How can I get python to look into python2.4 and python2.4/site-packages automatically (assuming it is 2.4) and likewise for 2.5. That way I can keep them under the same directory. /kk -- http://mail.python.org/mailman/listinfo/python-list
Re: object references/memory access
On Jul 1, 12:38 pm, dlomsak <[EMAIL PROTECTED]> wrote: > Thanks for the responses folks. I'm starting to think that there is > merely an inefficiency in how I'm using the sockets. The expensive > part of the program is definitely the socket transfer because I timed > each part of the routine individually. For a small return, the whole > search and return takes a fraction of a second. For a large return (in > this case 21,000 records - 8.3 MB) is taking 18 seconds. 15 of those > seconds are spent sending the serialized results from the server to > the client. I did a little bit of a blind experiment and doubled the > bytes on the client's socket.recv line. This improved the rate of > transfer each time. The original rate when I was accepting 1024 bytes > per recv took 47 seconds to send the 8.3 MB result. By doubling this > size several times, I reduced the time to 18 seconds until doubling it > further produced diminishing results. I was always under the > impression that keeping the send and recv byte sizes around 1024 is a > good idea and I'm sure that jacking those rates up is a lousy way to > mitigate the transfer. It is also interesting to note that increasing > the bytes sent per socket.send on the server side had no visible > effect. Again, that was just a curious experiment. > > What bothers me is that I am sure sending data over the local loopback > address should be blazing fast. 8.3 MB should be a breeze because I've > transferred files over AIM to people connected to the same router as > me and was able to send hundreds of megabytes in less than a two or > three seconds. With that said, I feel like something about how I'm > send/recv-ing the data is causing lots of overhead and that I can > avoid reading the memory directly if I can speed that up. > > I guess now I'd like to know what are good practices in general to get > better results with sockets on the same local machine. I'm only > instantiating two sockets total right now - one client and one server, > and the transfer is taking 15 seconds for only 8.3MB. If you guys have > some good suggestions on how to better utilize sockets to transfer > data at the speeds I know I should be able to achieve on a local > machine, let me know what you do. At present, I find that using > sockets in python requires very few steps so I'm not sure where I > could really improve at this point. > I have found the stop-and-go between two processes on the same machine leads to very poor throughput. By stop-and-go, I mean the producer and consumer are constantly getting on and off of the CPU since the pipe gets full (or empty for consumer). Note that a producer can't run at its top speed as the scheduler will pull it out since it's output pipe got filled up. When you increased the underlying buffer, you mitigated a bit this shuffling. And hence saw a slight increase in performance. My guess that you can transfer across machines at real high speed, is because there are no process swapping as producer and consumer run on different CPUs (machines, actually). Since the two processes are on the same machine, try using a temporary file for IPC. This is not as efficient as real shared memory -- but it does avoid the IPC stop-n-go. The producer can generate the multi-mega byte file at one go and inform the consumer. The file-systems have gone thru' decades of performance tuning that this job is done really efficiently. Thanks, Karthik > Thanks for the replies so far, I really appreciate you guys > considering my situation and helping out. -- http://mail.python.org/mailman/listinfo/python-list
Re: object references/memory access
On Jul 2, 3:01 pm, Steve Holden <[EMAIL PROTECTED]> wrote: > Karthik Gurusamy wrote: > > On Jul 1, 12:38 pm, dlomsak <[EMAIL PROTECTED]> wrote: > [...] > > > I have found the stop-and-go between two processes on the same machine > > leads to very poor throughput. By stop-and-go, I mean the producer and > > consumer are constantly getting on and off of the CPU since the pipe > > gets full (or empty for consumer). Note that a producer can't run at > > its top speed as the scheduler will pull it out since it's output pipe > > got filled up. > > But when both processes are in the memory of the same machine and they > communicate through an in-memory buffer, what's to stop them from > keeping the CPU fully-loaded (assuming they are themselves compute-bound)? If you are a producer and if your output goes thru' a pipe, when the pipe gets full, you can no longer run. Someone must start draining the pipe. On a single core CPU when only one process can be running, the producer must get off the CPU so that the consumer may start the draining process. > > > When you increased the underlying buffer, you mitigated a bit this > > shuffling. And hence saw a slight increase in performance. > > > My guess that you can transfer across machines at real high speed, is > > because there are no process swapping as producer and consumer run on > > different CPUs (machines, actually). > > As a concept that's attractive, but it's easy to demonstrate that (for > example) two machines will get much better throughput using the > TCP-based FTP to transfer a large file than they do with the UDP-based > TFTP. This is because the latter protocol requires the sending unit to > stop and wait for an acknowledgment for each block transferred. With > FTP, if you use a large enough TCP sliding window and have enough > content, you can saturate a link as ling as its bandwidth isn't greater > than your output rate. > > This isn't a guess ... What you say about a stop-n-wait protocol versus TCP's sliding window is correct. But I think it's totally orthogonal to the discussion here. The issue I'm talking about is how to keep the end nodes chugging along, if they are able to run simultaneously. They can't if they aren't on a multi- core CPU or one different machines. > > > Since the two processes are on the same machine, try using a temporary > > file for IPC. This is not as efficient as real shared memory -- but it > > does avoid the IPC stop-n-go. The producer can generate the multi-mega > > byte file at one go and inform the consumer. The file-systems have > > gone thru' decades of performance tuning that this job is done really > > efficiently. > > I'm afraid this comes across a bit like superstition. Do you have any > evidence this would give superior performance? > I did some testing before when I worked on boosting a shell pipeline performance and found using file-based IPC was very good. (some details at http://kar1107.blogspot.com/2006/09/unix-shell-pipeline-part-2-using.html ) Thanks, Karthik > >> Thanks for the replies so far, I really appreciate you guys > >> considering my situation and helping out. > > regards > Steve > -- > Steve Holden+1 571 484 6266 +1 800 494 3119 > Holden Web LLC/Ltd http://www.holdenweb.com > Skype: holdenweb http://del.icio.us/steve.holden > --- Asciimercial -- > Get on the web: Blog, lens and tag the Internet > Many services currently offer free registration > --- Thank You for Reading - -- http://mail.python.org/mailman/listinfo/python-list
Re: object references/memory access
On Jul 2, 6:32 pm, Steve Holden <[EMAIL PROTECTED]> wrote: > Karthik Gurusamy wrote: > > On Jul 2, 3:01 pm, Steve Holden <[EMAIL PROTECTED]> wrote: > >> Karthik Gurusamy wrote: > >>> On Jul 1, 12:38 pm, dlomsak <[EMAIL PROTECTED]> wrote: > >> [...] > > >>> I have found the stop-and-go between two processes on the same machine > >>> leads to very poor throughput. By stop-and-go, I mean the producer and > >>> consumer are constantly getting on and off of the CPU since the pipe > >>> gets full (or empty for consumer). Note that a producer can't run at > >>> its top speed as the scheduler will pull it out since it's output pipe > >>> got filled up. > >> But when both processes are in the memory of the same machine and they > >> communicate through an in-memory buffer, what's to stop them from > >> keeping the CPU fully-loaded (assuming they are themselves compute-bound)? > > > If you are a producer and if your output goes thru' a pipe, when the > > pipe gets full, you can no longer run. Someone must start draining the > > pipe. > > On a single core CPU when only one process can be running, the > > producer must get off the CPU so that the consumer may start the > > draining process. > > Wrong. The process doesn't "get off" the CPU, it remains loaded, and > will become runnable again once the buffer has been depleted by the > other process (which is also already loaded into memory and will become > runnable as soon as a filled buffer becomes available). > huh? "get off" when talking about scheduling and CPU implies you are not running. It is a common term to imply that you are not running -- doesn't mean it goes away from main memory. Sorry where did you learn your CS concepts? > > > >>> When you increased the underlying buffer, you mitigated a bit this > >>> shuffling. And hence saw a slight increase in performance. > >>> My guess that you can transfer across machines at real high speed, is > >>> because there are no process swapping as producer and consumer run on > >>> different CPUs (machines, actually). > >> As a concept that's attractive, but it's easy to demonstrate that (for > >> example) two machines will get much better throughput using the > >> TCP-based FTP to transfer a large file than they do with the UDP-based > >> TFTP. This is because the latter protocol requires the sending unit to > >> stop and wait for an acknowledgment for each block transferred. With > >> FTP, if you use a large enough TCP sliding window and have enough > >> content, you can saturate a link as ling as its bandwidth isn't greater > >> than your output rate. > > >> This isn't a guess ... > > > What you say about a stop-n-wait protocol versus TCP's sliding window > > is correct. > > But I think it's totally orthogonal to the discussion here. The issue > > I'm talking about is how to keep the end nodes chugging along, if they > > are able to run simultaneously. They can't if they aren't on a multi- > > core CPU or one different machines. > > If you only have one CPU then sure, you can only run one process at a > time. But your understanding of how multiple processes on the same CPU > interact is lacking. > huh? > > > > > >>> Since the two processes are on the same machine, try using a temporary > >>> file for IPC. This is not as efficient as real shared memory -- but it > >>> does avoid the IPC stop-n-go. The producer can generate the multi-mega > >>> byte file at one go and inform the consumer. The file-systems have > >>> gone thru' decades of performance tuning that this job is done really > >>> efficiently. > >> I'm afraid this comes across a bit like superstition. Do you have any > >> evidence this would give superior performance? > > > I did some testing before when I worked on boosting a shell pipeline > > performance and found using file-based IPC was very good. > > (some details > > athttp://kar1107.blogspot.com/2006/09/unix-shell-pipeline-part-2-using > > ) > > > Thanks, > > Karthik > > >>>> Thanks for the replies so far, I really appreciate you guys > >>>> considering my situation and helping out. > > If you get better performance by writing files and reading them instead > of using pipes to communicate then something is wrong. > Why don't you provide a better explanation for the obse
Re: object references/memory access
On Jul 2, 10:57 pm, "Martin v. Löwis" <[EMAIL PROTECTED]> wrote: > >>> I have found the stop-and-go between two processes on the same machine > >>> leads to very poor throughput. By stop-and-go, I mean the producer and > >>> consumer are constantly getting on and off of the CPU since the pipe > >>> gets full (or empty for consumer). Note that a producer can't run at > >>> its top speed as the scheduler will pull it out since it's output pipe > >>> got filled up. > > > On a single core CPU when only one process can be running, the > > producer must get off the CPU so that the consumer may start the > > draining process. > > It's still not clear why you say that the producer can run "at its top > speed". You seem to be suggesting that in such a setup, the CPU would > be idle, i.e. not 100% loaded. Assuming that the consumer won't block > for something else, then both processes will run at their "top speed". > Of course, for two processes running at a single CPU, the top speed > won't be the MIPs of a single processor, as they have to share the CPU. > > So when you say it leads to very poor throughput, I ask: compared > to what alternative? Let's assume two processes P and C. P is the producer of data; C, the consumer. To answer your specific question, compared to running P to completion and then running C to completion. The less optimal way is p1-->c1-- >p2-->c2-->. p_n---c_n where p1 is a time-slice when P is on CPU, c1 is a time-slice when c1 is on CPU. If the problem does not require two way communication, which is typical of a producer-consumer, it is a lot faster to allow P to fully run before C is started. If P and C are tied using a pipe, in most linux like OS (QNX may be doing something really smart as noted by John Nagle), there is a big cost of scheduler swapping P and C constantly to use the CPU. You may ask why? because the data flowing between P and C, has a small finite space (the buffer). Once P fills it; it will block -- the scheduler sees C is runnable and puts C on the CPU. Thus even if CPU is 100% busy, useful work is not 100%; the process swap overhead can kill the performance. When we use an intermediate file to capture the data, we allow P to run a lot bigger time-slice. Assuming huge file-system buffering, it's very much possible P gets one-go on the CPU and finishes it's job of data generation. Note that all these become invalid, if you have a more than one core and the scheduler can keep both P and C using two cores simulateanously. If that is the case, we don't incur this process-swap overhead and we may not see the stop-n-go performance drop. Thanks, Karthik > > Regards, > Martin -- http://mail.python.org/mailman/listinfo/python-list
Re: object references/memory access
On Jul 3, 2:33 pm, "Martin v. Löwis" <[EMAIL PROTECTED]> wrote: > > If the problem does not require two way communication, which is > > typical of a producer-consumer, it is a lot faster to allow P to fully > > run before C is started. > > Why do you say it's *a lot* faster. I find that it is a little faster. > The only additional overhead from switching forth and back between > consumer and producer is the overhead for context switching, which > is typically negligible, compared to everything else that is going > on. True it needn't be *a lot*. I did observe 25% gain or more when there were a chain of processes involved as in a shell pipeline. Again this could be very problem specific. What I had, included something like 4 or 5 processes connected as in p1 | p2 | p3 | p4 ... here I found the back-n-forth context switching was slowing down quite a bit (some thing like 2 mins task completed in under 40 seconds without the piping) If all you had is just two processes, P and C and the amount of data flowing is less (say on the order of 10's of buffer-size ... e.g. 20 times 4k), *a lot* may not be right quantifier. But if the data is large and several processes are involved, I am fairly sure the overhead of context-switching is very significant (not negligible) in the final throughput. Thanks, Karthik > > Regards, > Martin -- http://mail.python.org/mailman/listinfo/python-list
Re: using subprocess for non-terminating command
On Jul 4, 4:38 am, Phoe6 <[EMAIL PROTECTED]> wrote:
> Hi all,
> Consider this scenario, where in I need to use subprocess to execute a
> command like 'ping 127.0.0.1' which will have a continuous non-
> terminating output in Linux.
>
> # code
>
> >>>import subprocess
> >>>process = subprocess.Popen('ping 127.0.0.1', shell=True,
> >>>stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
> >>>print process.pid # returns pid
> >>>print process.poll() # returns None!!! this is strange.
It's expected behavior. It means the child process is still running.
> >>>print process.stdout.read()
>
> # This hangs at this point.
This too is expected behavior. 'ping ' runs forever
generating continuous output. It doesn't stop by itself.
read() reads until there is data available in the file object. Thus it
doesn't ever finish since the child never stops generating data.
If you do read(n), then it reads n bytes and returns.
> How should I handle these kind of commands (ping 127.0.0.1) with
> subprocess module. I am using subprocess, instead of os.system because
> at anypoint in time, I need access to stdout and stderr of execution.
>
Using subprocess is good. Just ensure your child stops data generation
at some point. For ping, you can use '-c ' or some other
application, you can try closing it's stdin (e.g. cat, bc, gdb)
Thanks,
Karthik
> Thanks,
> Senthil
--
http://mail.python.org/mailman/listinfo/python-list
In a dynamic language, why % operator asks user for type info?
Hi, The string format operator, %, provides a functionality similar to the snprintf function in C. In C, the function does not know the type of each of the argument and hence relies on the embedded % specifier to guide itself while retrieving args. In python, the language already provides ways to know the type of an object. So in output = '%d foo %d bar" % (foo_count, bar_count), why we need to use %d? I'm thinking some general common placeholder, say %x (currently it's hex..) could be used. output = '%x foo %x bar" % (foo_count, bar_count). Since % by definition is string formatting, the operator should be able to infer how to convert each of the argument into strings. If the above is the case, we could've avoided all those exceptions that happen when a %d is specified but say a string is passed. Thanks, Karthik -- http://mail.python.org/mailman/listinfo/python-list
Re: In a dynamic language, why % operator asks user for type info?
On Jul 16, 5:18 pm, Dan Bishop <[EMAIL PROTECTED]> wrote: > On Jul 16, 7:10 pm, Karthik Gurusamy <[EMAIL PROTECTED]> wrote:> Hi, > > > The string format operator, %, provides a functionality similar to the > > snprintf function in C. In C, the function does not know the type of > > each of the argument and hence relies on the embedded % > > specifier to guide itself while retrieving args. > > > In python, the language already provides ways to know the type of an > > object. > > > So in > > > output = '%d foo %d bar" % (foo_count, bar_count), > > why we need to use %d? > > In order to distinguish between, for example: > > > > >>> '%c' % 42 > '*' > >>> '%d' % 42 > '42' > >>> '%e' % 42 > '4.20e+01' > >>> '%f' % 42 > '42.00' > >>> '%g' % 42 > '42' > >>> '%i' % 42 > '42' > >>> '%o' % 42 > '52' > >>> '%r' % 42 > '42' > >>> '%s' % 42 > '42' Thanks. The above surprised me as I didn't expect that %s will accept 42. Looks like the implicit conversion doesn't work the other way. >>> '%s' % 42 '42' >>> '%d' % '42' Traceback (most recent call last): File "", line 1, in TypeError: int argument required >>> Looks like %s can be used even when I'm sending non-strings. >>> '%s foo %s bar' % (25, 25.34) '25 foo 25.34 bar' >>> So %s seems to serve the multi-type placeholder. Karthik > >>> '%u' % 42 > '42' > >>> '%x' % 42 > > '2a' -- http://mail.python.org/mailman/listinfo/python-list
Re: a=0100; print a ; 64 how to reverse this?
On Jul 17, 5:35 am, Bruno Desthuilliers wrote:
> mosi a écrit :
>
>
>
> > Problem:
> > how to get binary from integer and vice versa?
> > The simplest way I know is:
> > a = 0100
> > a
> > 64
>
> > but:
> > a = 100 (I want binary number)
> > does not work that way.
>
> > a.__hex__ exists
> > a.__oct__ exists
>
> > but where is a.__bin__ ???
>
> > What`s the simplest way to do this?
>
> [EMAIL PROTECTED]:~$ python
> Python 2.5.1 (r251:54863, May 2 2007, 16:56:35)
> [GCC 4.1.2 (Ubuntu 4.1.2-0ubuntu4)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> >>> help(int)
> Help on class int in module __builtin__:
>
> class int(object)
> | int(x[, base]) -> integer
> |
> | Convert a string or number to an integer, if possible. A floating
> point
> | argument will be truncated towards zero (this does not include a string
> | representation of a floating point number!) When converting a
> string, use
> | the optional base. It is an error to supply a base when converting a
> | non-string. If the argument is outside the integer range a long object
> | will be returned instead.
>
> >>> a = int('100', 2)
> >>> a
> 4
> >>>
>
> HTH
While it's interesting to know we can go from binary to int, the OP
wanted the other way.
I think it will be a nice enhancement to add to % operator (like %x,
something for binary, %b or %t say) or something like a.__bin__ as
suggested by the OP.
FWIW, gdb has a /t format to print in binary.
(gdb) p 100
$28 = 100
(gdb) p /x 100
$29 = 0x64
(gdb) p /t 100
$30 = 1100100
(gdb)
--Karthik
--
http://mail.python.org/mailman/listinfo/python-list
Re: Newbie question regarding string.split()
On Apr 20, 11:51 am, kevinliu23 <[EMAIL PROTECTED]> wrote:
> Hey guys,
>
> So I have a question regarding the split() function in the string
> module. Let's say I have an string...
>
> input = "2b 3 4bx 5b 2c 4a 5a 6"
> projectOptions = (input.replace(" ", "")).split('2')
> print projectOptions
>
> ['', 'b34bx5b', 'c4a5a6']
>
The confusion, as you can see from other posts, is because the
behavior is different from default split().
Default split works on whitespace and we don't get leading/trailing
empty list items.
So just add input = input.strip('2') after the input assignment (BTW
someone had
pointed input is a reserved identifier). Note this solution will work
for splitting on any sequence of chars..just strip them first. Note we
still get empty elements in the middle of the string -- this probably
we want to get in most cases.
Karthik
> My question is, why is the first element of projectOptions an empty
> string? What can I do so that the first element is not an empty
> string? but the 'b34bx5b' string as I expected?
>
> Thanks so much guys. :)
--
http://mail.python.org/mailman/listinfo/python-list
Re: split a string of space separated substrings - elegant solution?
On Aug 1, 12:41 am, Helmut Jarausch <[EMAIL PROTECTED]> wrote: > Many thanks to all of you! > It's amazing how many elegant solutions there are in Python. Here is yet another solution. pexpect.split_command_line() >From the documentation: split_command_line(command_line) This splits a command line into a list of arguments. It splits arguments on spaces, but handles embedded quotes, doublequotes, and escaped characters. It's impossible to do this with a regular expression, so I wrote a little state machine to parse the command line. http://pexpect.sourceforge.net/pexpect.html But I am surprised to see there is a standard module already doing this (shlex) Karthik > > -- > Helmut Jarausch > > Lehrstuhl fuer Numerische Mathematik > RWTH - Aachen University > D 52056 Aachen, Germany -- http://mail.python.org/mailman/listinfo/python-list
Re: Chaining programs with pipe
On Aug 21, 3:09 pm, avishay <[EMAIL PROTECTED]> wrote:
> Hello
> I'm trying to chain two programs with a pipe (the output of one
> feeding the input of the other). I managed to capture the output and
> feeding the input of each program independently with popen, but how do
> I tie them together? Is there a solution that works equally on all
> platforms?
Not sure on non-unix platforms, but in unix like platforms it's best
to reuse shell's power.
>>> import commands
>>> commands.getoutput('ls | wc')
' 4 4 24'
>>>
Thanks,
Karthik
>
> Thanks,
> Avishay
--
http://mail.python.org/mailman/listinfo/python-list
Re: Chaining programs with pipe
On Aug 21, 8:33 pm, Grant Edwards <[EMAIL PROTECTED]> wrote:
> On 2007-08-22, Karthik Gurusamy <[EMAIL PROTECTED]> wrote:
>
> > Not sure on non-unix platforms, but in unix like platforms it's best
> > to reuse shell's power.
>
> >>>> import commands
> >>>> commands.getoutput('ls | wc')
> > ' 4 4 24'
>
> Executing a shell just because you want a pipe seems like a bit
> of overkill. Doing it the "right" way with subprocess is
> pretty trivial.
Probably I should've put extra stress on the word "reuse".
The example quoted was trivial; if you replace the pipeline to have
say 5 processes, the advantage of not-reinventing the wheel becomes
more obvious.
I would call hand-crafting the pipe-setup an "overkill" when a very
good solution already exists to solve the problem. Yes, it may be
trivial to do; but not simpler than delegating to a shell.
Karthik
>
> --
> Grant Edwards grante Yow! I was born in a
> at Hostess Cupcake factory
>visi.combefore the sexual
>revolution!
--
http://mail.python.org/mailman/listinfo/python-list
Re: how can I find out the process ids with a process name
On Sep 2, 12:26 pm, herman <[EMAIL PROTECTED]> wrote:
> Hi,
>
> I would like to find out all the process id with the process name
> 'emacs'.
>
> In the shell, i can do this:
>
> $ ps -ef |grep emacs
> root 20731 8690 0 12:37 pts/200:00:09 emacs-snapshot-gtk
> root 25649 25357 0 13:55 pts/900:00:05 emacs-snapshot-gtk rtp.c
> root 26319 23926 0 14:06 pts/700:00:04 emacs-snapshot-gtk
> stressTestVideo.py
> root 26985 1 0 14:15 ?00:00:01 /usr/bin/emacs-snapshot-
> gtk
> root 27472 21066 0 14:23 pts/500:00:00 grep emacs
>
> and I can see the process id is 20731, 25649, etc, etc.
>
> But now I would like to do the programmically in my python script.
> I know I can use ' os.system(cmd)' to execute the command 'ps -ef |
> grep emacs', but how
> can I pipe the output of my 'ps -ef | grep emacs' to my python script
> and then run a regression expression with it to get the process Ids?
>
Try commands module; it's simple to just get the output. subprocess
module is a newer way to doing things. But commands.getoutput() is lot
simpler for simple shell like tasks.
>>> import commands
>>> commands.getoutput("ps -ef | grep emacs | awk '{print $2}'")
'21739\n15937\n15287\n5097\n14797\n31777\n8779\n2973\n5413\n13024\n13026'
>>>
Your script can then use the output as its input.
Karthik
> Thank you.
--
http://mail.python.org/mailman/listinfo/python-list
Re: concise code (beginner)
On Sep 5, 11:17 am, James Stroud <[EMAIL PROTECTED]> wrote: > bambam wrote: > > I have about 30 pages (10 * 3 pages each) of code like this > > (following). Can anyone suggest a more compact way to > > code the exception handling? If there is an exception, I need > > to continue the loop, and continue the list. > > > Steve. > > > --- > > for dev in devs > > try: > > dev.read1() > > except > > print exception > > remove dev from devs > > > for dev in devs > > try: > > dev.read2() > > [etc.] > > My keen sense of pattern recognition tells me that all of your read's > follow the same naming pattern--or was that just an accidental naming > coincidence on your part for the sake of example? > > for i in xrange(number_of_reads): >for dev in devs: > try: >_reader = getattr(dev, 'read%d' % i) >_reader() > except Exception, e: >print e >devs.remove(dev) I see in many of the solutions suggested above, the devs sequence/ iterator is being modified while iterating. I know it is not defined for interation over dictionary keys. Are they defined for other collections like lists? Karthik > > James > > -- > James Stroud > UCLA-DOE Institute for Genomics and Proteomics > Box 951570 > Los Angeles, CA 90095 > > http://www.jamesstroud.com/ -- http://mail.python.org/mailman/listinfo/python-list
Re: concise code (beginner)
On Sep 5, 1:37 pm, James Stroud <[EMAIL PROTECTED]> wrote: > Karthik Gurusamy wrote: > > On Sep 5, 11:17 am, James Stroud <[EMAIL PROTECTED]> wrote: > > >> for i in xrange(number_of_reads): > >>for dev in devs: > >> try: > >>_reader = getattr(dev, 'read%d' % i) > >>_reader() > >> except Exception, e: > >>print e > >>devs.remove(dev) > > > I see in many of the solutions suggested above, the devs sequence/ > > iterator is being modified while iterating. I know it is not defined > > for interation over dictionary keys. Are they defined for other > > collections like lists? > > Good eye! My code is broke as you have noticed: > > py> r = range(5) > py> for i in r: > ... print i > ... if i % 2: > ... r.remove(i) > ... > 0 > 1 > 3 > > For longer sequences, the algorithm I've used in these cases in the past > goes something like this: > > py> r = range(10, 17) > py> print r > [10, 11, 12, 13, 14, 15, 16] > py> > py> i = 0 > py> while i < len(r): > ... j = r[i] > ... print j > ... if j % 2: > ... r.remove(j) > ... else: > ... i += 1 > ... > 10 > 11 > 12 > 13 > 14 > 15 > 16 > py> print r > [10, 12, 14, 16] > > Which would change my problematic code above to: > > for i in xrange(number_of_reads): >j = 0 >while j < len(devs): > try: >_reader = getattr(devs[j], 'read%d' % i) >_reader() >j += 1 > except Exception, e: >print e >devs.remove(dev) > > Another way is to make a copy of devs, if devs is short, which makes my > problematic code into a matter of a "typo"--maybe I can make this claim > to save some face? > > for i in xrange(number_of_reads): >for dev in devs[:]: > try: >_reader = getattr(dev, 'read%d' % i) >_reader() > except Exception, e: >print e >devs.remove(dev) > Thanks, personally I like this duplicate copy solution. It's cleaner and easier on the eye/brain. (Moreover the C like first solution assumes the collection is a sequence, which is not true for dictionary/ set like collections). That said, it may be a good future language enhancement to define a reasonable consistent behavior for an iterator over a changing collection. This occurs quite common when we walk a collection and usually delete the current item. For a sequence, what the expected behavior is quite obvious (just remove this element and go over to the next). For other collections like dictionary/set, again if the operation is delete, the expected behavior is obvious. If we are doing insertion, for sequence a well- defined behavior can be formulated (based on insert before or after current position -- if after we will see it in the walk, if before we won't see it) . For dict/set I see this isn't simple (as based on hash key we may insert ahead or later of the current 'cursor'/position. Karthik > James > > -- > James Stroud > UCLA-DOE Institute for Genomics and Proteomics > Box 951570 > Los Angeles, CA 90095 > > http://www.jamesstroud.com/ -- http://mail.python.org/mailman/listinfo/python-list
Re: How to insert in a string @ a index
On Sep 8, 11:02 am, [EMAIL PROTECTED] wrote:
> Hi;
>
> I'm trying to insert XYZ before a keyword in a string. The first and
> the last occurence of hello in the string t1 (t1="hello world hello.
> hello \nwhy world hello") are keywords. So after the insertion of XYZ
> in this string, the result should be t1 = "XYZhello world hello. hello
> \nwhy world XYZhello"
>
> The python doesn't supports t1[keyword_index]="XYZhello" (string
> object assignment is not supported). How do I get to this problem? Any
> sugguestions?
Yet another solution using re
>>> t1 = 'hello world hello. hello. \nwhy world hello'
>>> import re
>>> l1 = re.split('hello', t1)
>>> l1[0] = 'XYZ' + l1[0]
>>> l1[-2] += 'XYZ'
>>> 'hello'.join(l1)
'XYZhello world hello. hello. \nwhy world XYZhello'
>>>
If there are less than two 'hello', you'll get exception and needs
special handling.
Karthik
>
> -a.m.
--
http://mail.python.org/mailman/listinfo/python-list
optparse -- anyway to find if the user entered an option?
Hi,
I see that I can provide a default value for an option. But I couldn't
find out any way if the user really entered the option or the option
took that value because of default. A simple check for value with
default may not always work as the user might have manually
entered the same default value.
Let's assume I want to take in the ip-address using -i .
If user didn't give it explicitly, I am going to use socket interface
to figure out this host's IP address.
ip_addr_default = '100.100.100.100'
parser.add_option("-i", "--ip-address", dest="ip",
default=ip_addr_default,
metavar="IP-ADDRESS", help="IP address. default:" +
ip_addr_default + "e.g. --i=1.1.1.1"
)
(options, args) = parser.parse_args()
Now if options.ip == ip_addr_default, I still can't be 100% sure that
the user did not type -i 100.100.100.100.
Any way to figure out from options that the user typed it or not?
(The reason I want to know this is if user did not mention -i, I can
compute IP later
using socket module)
I could think of a hack of using None as default and since no user can
ever
enter a None value, I can be sure that the user didn't provide -i.
I'm wondering if there is a cleaner approach -- something like
parser.opt_seen("-i")
Thanks,
Karthik
--
http://mail.python.org/mailman/listinfo/python-list
Re: optparse -- anyway to find if the user entered an option?
On Apr 14, 7:54 pm, Steven D'Aprano
<[EMAIL PROTECTED]> wrote:
> On Sat, 14 Apr 2007 16:49:22 -0700, Karthik Gurusamy wrote:
> > I'm wondering if there is a cleaner approach -- something like
> > parser.opt_seen("-i")
>
> What do dir(parser) and help(parser) say?
They don't seem to convey existence of a routine like the
one I'm looking for. I did check the lib reference - I guess
such a support is not available. Most likely the 'None'
solution will work for me. I will go ahead with it.
Karthik
>>> dir(parser)
['__doc__', '__init__', '__module__', '_add_help_option',
'_add_version_option', '_check_conflict', '_create_option_list',
'_create_option_mappings', '_get_all_options', '_get_args',
'_get_encoding', '_init_parsing_state', '_long_opt',
'_match_long_opt', '_populate_option_list', '_process_args',
'_process_long_opt', '_process_short_opts', '_share_option_mappings',
'_short_opt', 'add_option', 'add_option_group', 'add_options',
'allow_interspersed_args', 'check_values', 'conflict_handler',
'defaults', 'description', 'destroy', 'disable_interspersed_args',
'enable_interspersed_args', 'epilog', 'error', 'exit',
'expand_prog_name', 'format_description', 'format_epilog',
'format_help', 'format_option_help', 'formatter',
'get_default_values', 'get_description', 'get_option',
'get_option_group', 'get_prog_name', 'get_usage', 'get_version',
'has_option', 'largs', 'option_class', 'option_groups', 'option_list',
'parse_args', 'print_help', 'print_usage', 'print_version',
'process_default_values', 'prog', 'rargs', 'remove_option',
'set_conflict_handler', 'set_default', 'set_defaults',
'set_description', 'set_process_default_values', 'set_usage',
'standard_option_list', 'usage', 'values', 'version']
>>>
>>> print sys.version
2.5 (r25:51908, Sep 29 2006, 12:35:59)
[GCC 3.2.3 20030502 (Red Hat Linux 3.2.3-54)]
help(parser) just gives info on a generic instance.
>
> --
> Steven.
--
http://mail.python.org/mailman/listinfo/python-list
Re: Compare regular expressions
On Apr 16, 2:50 am, Thomas Dybdahl Ahle <[EMAIL PROTECTED]> wrote: > Hi, I'm writing a program with a large data stream to which modules can > connect using regular expressions. > > Now I'd like to not have to test all expressions every time I get a line, > as most of the time, one of them having a match means none of the others > can have so. > > But ofcource there are also cases where a regular expression can > "contain" another expression, like in: > "^strange line (\w+) and (\w+)$" and "^strange line (\w+) (?:.*?)$" in > which case I'd like to first test the seccond and only if it mathces test > the seccond. > > Do anybody know if such a test is possible? > if exp0.contains(exp1): ... What you want is finding if R2 is a superset of R1 for two given regular languages R1 and R2. I know of some methods for finding intersection of two regular languages; and I think the time/space complexity is big. So the simple answer is it is not feasible to provide such support for two generic r.e.s without a large time/space usage. You may consult any of the math/theory groups for more insights. If you know already R2 >= R1 (that is you precompute and remember), then it's a trivial to skip checking for R1 if R2 turned up negative. You can even arrange all the Rs in a binary tree like fashion and skip checking a whole subtree if the sub-tree's root node gave negative for r.e. match. Karthik -- http://mail.python.org/mailman/listinfo/python-list
Re: How can I know how much to read from a subprocess
On Sep 17, 4:14 pm, [EMAIL PROTECTED] wrote: > Hello, > > I want to write a terminal program in pygtk. It will run a subprocess, > display everything it writes in its standard output and standard > error, and let the user write text into its standard input. > > The question is, how can I know if the process wrote something to its > output, and how much it wrote? I can't just call read(), since it will > block my process. The solution is not simple. You may try a separate thread to do the reading (so that you can afford to block that single thread). You can also run into other I/O deadlocks with a simple solution (ie if you try to feed the whole stdin before reading any stdout, your stdin feeding can block because the process's stdout is full -- it's a bit confusing but for large stdin/stdout, a deadlock is very much possible). Did you try subprocess.Popen.communicate() ? Under the covers it should take care of th ese deadlock issues. It may read and buffer the whole stdout/stderr, so don't use it if the process generates infinite/ very-large stdout/stderr. If the process you launch needs two way communication, like an ssh/ftp session (where the stdin depends on process's prior stdout), the only viable solution is to simulate a human (e.g. pexpect module) Karthik > > Thanks, > Noam -- http://mail.python.org/mailman/listinfo/python-list
Re: Removing objects in a list via a wild card
On Sep 19, 1:11 pm, David <[EMAIL PROTECTED]> wrote:
> On 9/19/07, James Matthews <[EMAIL PROTECTED]> wrote:
>
> > Hi List
>
> > I have a list of files from my current directory:
>
> > import os
>
> > files = os.listdir(os.getcwd())
>
> > Now this list also includes some files that i don't want like my python
> > files... How would i remove them
>
> You can use regular expressions:
>
> import re
> files=[file for file in os.listdir(os.getcwd()) if not
> re.match('^.+\.((py)|(pyc))$', file)]
>
> You can also use fnmatch:
>
> from fnmatch import fnmatch
> files = [file for file in os.listdir(os.getcwd()) if not fnmatch(file,
> '*.py') and not fnmatch(file, '*.pyc')]
Another option is to use glob.
import glob
p1 = glob.glob('*.py')
p2 = glob.glob('*.pyc')
all = glob.glob('*') # won't include '.', '..'
non_py = set(all) - set(p1) - set(p2)
Karthik
--
http://mail.python.org/mailman/listinfo/python-list
Re: Sets in Python
On Sep 19, 6:16 am, Sion Arrowsmith <[EMAIL PROTECTED]>
wrote:
> sapsi <[EMAIL PROTECTED]> wrote:
> > Why can't lists be hashed?
>
> Several people have answered "because they're mutable" without
> explaining why mutability precludes hashing. So:
>
> Consider a dict (dicts have been in Python a *lot* longer than
> sets, and have the same restriction) which allowed lists as
> keys:
>
> d = {}
> k = [1, 2]
> d[k] = None
>
> Now, if I were to do:
>
> k.append(3)
>
> what would you expect:
>
> d.keys()
>
> to return? Did d magically rehash k when it was modified? Did d[k]
> take a copy of k, and if so, how deep was the copy (consider
> d[[1, k]] = None followed by a modification to k)? Leaving the hash
> unchanged and relying on collision detection to resolve won't work,
> since you may go directly for d[[1, 2, 3]] and not spot that
> there's already an entry for it since it's been hashed under [1, 2].
>
> "Practicality beats purity" and the design decision was to simply
> sidestep these issues by disallowing mutable dict keys. And as the
> set implementation is based on the dict implementation, it applies
> to sets to.
While it's easy to explain the behavior, I think the decision to dis-
allow mutable items as keys is a bit arbitrary. There is no need for
dict to recompute hash (first of all, a user doesn't even need to know
if underneath 'hashing' is used -- the service is just a mapping
between one item to another item).
Since we know hashing is used, all that is needed is, a well-defined
way to construct a hash out of a mutable. "Given a sequence, how to
get a hash" is the problem. If later the given sequence is different,
that's not the dict's problem.
>>> d = {}
a = 10
>>> d[a] = 'foo'
>>> d[5+5] = 'bar'
>>> d[10]
'bar'
aren't the '5+5' which is 10, is different from the previous line's
a?.. so
why not allow similar behavior with lists/other sequence/even other
collections. As long as two objects compare equal the hash-result must
be the same. I guess this takes us to defining the equality operation
for lists-- which I think has a very obvious definition (ie same
length and the ith element of each list compare equal).
So if the list changes, it will result in a different hash and we will
get a hash-miss. I doubt this is in anyway less intuitive than dis-
allowing mutable items as keys.
Karthik
>
> --
> \S -- [EMAIL PROTECTED] --http://www.chaos.org.uk/~sion/
>"Frankly I have no feelings towards penguins one way or the other"
> -- Arthur C. Clarke
>her nu becomeþ se bera eadward ofdun hlæddre heafdes bæce bump bump bump
--
http://mail.python.org/mailman/listinfo/python-list
Re: Sets in Python
On Sep 19, 3:06 pm, Paddy <[EMAIL PROTECTED]> wrote: > On Sep 19, 9:58 pm, Karthik Gurusamy <[EMAIL PROTECTED]> wrote: > > > Since we know hashing is used, all that is needed is, a well-defined > > way to construct a hash out of a mutable. "Given a sequence, how to > > get a hash" is the problem. If later the given sequence is different, > > that's not the dict's problem. > > Oh it is possible to construct a hash from a mutable. What is > difficult is creating the same hash when the mutable mutates. Why? There is no reason that the dict should maintain the same hash, after all the user is calling with a different sequence as key (after the mutation). There seems to be an underlying assumption that the dictionary key- >value mapping should somehow maintain the mapping even when the key changes behind its back. The contract could very well be, hey if you give me a different sequence later (by mutating the one you added), don't expect me to find it in the dictionary. >Or > indeed working out what it means when a hash key mutates and you > access the dictionary. > Ignoring this gives the programmer a big problem hence the limitation. > > I don't think you have a better solution. But why would a programmer expect to find the match, when his/her code has changed the sequence (or has somehow let the hash key mutate) from the time of dictionary addition. If I did, a = [10, 20] and I did d[a]= 'foo', then a.append(30). If dict complains key error on d[a] now, I won't be surprised. If I do d[[10, 20, 30]], I will be surprised if it doesn't find the item. Of course, in today's behavior the above is syntax error. Karthik > > - Paddy. -- http://mail.python.org/mailman/listinfo/python-list
Re: Sets in Python
On Sep 19, 7:17 pm, Steven D'Aprano <[EMAIL PROTECTED]
cybersource.com.au> wrote:
> On Wed, 19 Sep 2007 20:58:03 +, Karthik Gurusamy wrote:
> > While it's easy to explain the behavior, I think the decision to dis-
> > allow mutable items as keys is a bit arbitrary. There is no need for
> > dict to recompute hash
>
> What???
>
> Of course it does. How else can it look up the key? Because it (somehow)
> just recognizes that it has seen the key before? How? By magic?
You answered it yourself later. For a mapping service, hash is just
one way to do things. What you need is for each item in the
collection, a unique key.
How you go from the key to the value is not something a programmer
needs to know.
Your mind is set on thinking on hash alone and hence you don't see
beyond it.
>
> > (first of all, a user doesn't even need to know
> > if underneath 'hashing' is used -- the service is just a mapping between
> > one item to another item).
>
> The user doesn't need to know the mechanism, but the dict does. Dicts are
> implemented as hash tables. I suppose they could be implemented as
> something else (what? linear lists? some sort of tree?) but the same
> considerations must be made:
Oh yes. If the keys are all integers (or any set of items that can be
ordered), why not an avl. It has guaranteed O(log N) while a hash in
worst case is O(N). Why you want to tie yourself to the drawbacks of
one datastructure? Understand your goal is not to provide a hash; but
to provide a mapping service.
the dict must be able to find keys it has
> seen before. How is the dict supposed to recognise the key if the key has
> changed?
>
> > Since we know hashing is used, all that is needed is, a well-defined way
> > to construct a hash out of a mutable. "Given a sequence, how to get a
> > hash" is the problem.
>
> Nonsense. That's not the problem. The problem is how to get exactly the
> same hash when the sequence has changed.
Yes, if you keep thinking hash is the only tool you got.
>
> In other words, if you have two mutable objects M1 and M2, then you
> expect:
>
No. I don't expect. I expect the hash to be different. Why do you keep
thinking it's the mappings responsibility to take care of a changing
key.
> hash(M1) == hash(M2) if and only if M1 and M2 are equal
> hash(M1) != hash(M2) if M1 and M2 are unequal
>
> but also:
>
> if M1 mutates to become equal to M2, hash(M1) must remain the same while
> still being different from hash(M2).
>
> That means that hash() now is a non-deterministic function. hash([1,2,3])
> will vary according to how the list [1,2,3] was constructed!
>
> Obviously something has to give, because not all of these things are
> mutually compatible.
>
> > If later the given sequence is different, that's
> > not the dict's problem.
>
> Data structures don't have problems. Programmers do. And language
> designers with sense build languages that minimize the programmers
> problems, not maximize them.
Yes, here you talk about a different goal altogether. Here comes the
'arbitrary' part I mentioned.
>
> > So if the list changes, it will result in a different hash and we will
> > get a hash-miss. I doubt this is in anyway less intuitive than dis-
> > allowing mutable items as keys.
>
> The choices for the language designer are:
>
> (1) Invent some sort of magical non-deterministic hash function which
> always does the Right Thing.
Nope, just say if the new sequence is different, you don't find the
item in the dict.
>
> (2) Allow the hash of mutable objects to change, which means you can use
> mutable objects as keys in dicts but if you change them, you can no
> longer find them in the dict. They'll still be there, using up memory,
> but you can't get to them.
In the new model, at the time of addition, you need to remember the
key at that time. If it's a list, you make a copy of the items.
>
> (3) Simply disallow mutable objects as keys.
>
> Alternative 1 is impossible, as we've seen, because the requirements for
> the Right Thing are not mutually compatible.
>
> Alternative (2) leads to hard-to-find, hard-to-diagnose bugs where you
> store objects in a dict only for them to mysteriously disappear later.
> Worse, it could lead to bugs like the following hypothetical:
Of course they can be reached with.. for k in dict...
>
> >>> M = [1, 2, 3]
> >>> D = {M: 'parrot'} # pretend this works
> >>> D
>
> {[1, 2, 3]: 'parrot'}>>> M.append(4)
> >>> D
>
> {[1, 2, 3, 4]: 'parrot'}>>> D[[1, 2, 3, 4]]
No, in the new way,
Re: subprocess -popen - reading stdout from child - hangs
On Sep 22, 8:28 pm, "[EMAIL PROTECTED]" <[EMAIL PROTECTED]>
wrote:
> Let's say I have this Python file called loop.py:
>
> import sys
> print 'hi'
> sys.stdout.flush()
Add sys.stdout.close()
> while 1:
> pass
>
> And I want to call it from another Python process and read the value
> 'hi'. How would I do it?
>
> So far I have tried this:
>
> >>> proc = subprocess.Popen('python
> >>> /home/chiefinnovator/loop.py',shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE)
> >>> proc.stdout.read()
>
> But it just hangs at read()
>
> proc.communicate() also just hangs. What am I doing wrong? Please
> advise.
Since your loop.py is still alive and hasn't closed its stdout, the
caller continues to wait for EOF (it doesn't know if loop.py is done
generating all its output)
Karthik
>
> Thanks,
>
> Greg
--
http://mail.python.org/mailman/listinfo/python-list
Re: shutil.copy2 error
On Sep 24, 7:34 am, Horse <[EMAIL PROTECTED]> wrote:
> I've written a python script that copies a nightly Oracle backup file
> to another server. Every couple days, the script fails with this
> error message:
>
> Error copying Q:/Oradata/GISPROD/Backups/3UISN35R_1_1 to s:/gisprod/
> backups/3UISN35R_1_1
> [Errno 22] Invalid argument
>
> Here's the code for the function I'm running. The path names are all
> correct, and it works *most of the time*. It only fails about once or
> twice a week. Anyone know where I can get more info on this "errno 22
> invalid argument"?
>
> def CopyNewFiles(SOURCE_DIR, DEST_DIR):
> global STATUS
> try:
> os.chdir(SOURCE_DIR)
> for x in os.listdir(SOURCE_DIR):
> int_time = os.stat(x)[stat.ST_CTIME]
> str_time = time.ctime(int_time)
> if datetime.date.fromtimestamp(int_time + 0.00) >
> YESTERDAY:
> try:
> DEST_FILE = os.path.join(DEST_DIR, x)
> logfile.write(" Copying " + SOURCE_DIR + x + "
> to " + DEST_FILE + "\n")
> logfile.flush()
> if not os.path.isfile(DEST_FILE):
> shutil.copy2(x, DEST_FILE)
I'm not sure of the error; but one possibility is that the source-
directory contents may be modified by some other process, while you
are looping here copying one-by-one the files.
So a file 'x' could be present at the time you enter loop, but gone by
the time you try the shutil.copy2. Again this is just a guess...
Yet another possibility is 'x' is not a regular file (say in unix it
could be a named pipe).. you can try adding a check for x (like
os.path.isfile(x)) and copy only regular files.
Karthik
> else:
> logfile.write("File exists. Skipping.
> \n")
> logfile.flush()
> except (IOError, os.error), why:
> logfile.write("\n\nError copying " + SOURCE_DIR +
> x + " to " + DEST_FILE + "\n\n")
> logfile.write("\n" + str(why) + "\n")
> logfile.flush()
> STATUS = "FAILED"
> except:
> logfile.write("\n\nUnhandled error in CopyNewFiles\n\n")
> logfile.write("SOURCE_DIR = " + SOURCE_DIR + "\n")
> logfile.write("DEST_DIR = " + DEST_DIR + "\n")
> logfile.flush()
> STATUS = "FAILED"
--
http://mail.python.org/mailman/listinfo/python-list
Re: subprocess -popen - reading stdout from child - hangs
On Sep 24, 2:22 pm, "[EMAIL PROTECTED]" <[EMAIL PROTECTED]> wrote: > On Sep 23, 2:58 am, Karthik Gurusamy <[EMAIL PROTECTED]> wrote: > > > On Sep 22, 8:28 pm, "[EMAIL PROTECTED]" <[EMAIL PROTECTED]> > > wrote: > > > > Let's say I have this Python file called loop.py: > > > > import sys > > > print 'hi' > > > sys.stdout.flush() > > > Add sys.stdout.close() > > Adding sys.stdout.close() and removing sys.stdout.flush() seems to > make it work. But can the while loop still use sys.stdout later on? > Do I have to reopen it? Once you close a file-object, you cannot use it. You'll get exception if you try. I quickly tried the fdopen on id 1 (stdout in unix like OS) and it seems to work >>> import sys >>> sys.stdout.close() >>> import os >>> print 'hi' Traceback (most recent call last): File "", line 1, in ValueError: I/O operation on closed file >>> sys.stdout=os.fdopen(1, 'w') >>> print 'hi' hi >>> BUT you may want to re-think your design. Note that your caller process will anyway stop reading further from your loop.py process the moment it sees the "first" EOF. So even if you enable loop.py to generate more output (thru' the above fdopen), the caller process is not going to see the newly generated data. If your requirement is to collect all output from loop.py then you can't do it if loop.py has an infinite loop across its data generation points (ie it can generate data after the infinite loop -- which anyway doesn't sense). If all you want is not to get blocked, try one of the select solutions or read a small amount at a time (which you can guarantee to be available). Yet another solution would be you could set up an alarm and get out of the blocking read if the alarm fires. Karthik > > Thanks, > > Greg -- http://mail.python.org/mailman/listinfo/python-list
Re: Confused about 'positive lookbehind assertion'
On Sep 25, 8:01 am, Erik Jones <[EMAIL PROTECTED]> wrote:
> On Sep 24, 2007, at 9:38 PM, Robert Dailey wrote:
>
> > Hi,
>
> > I've been reading the python documentation on 'positive lookbehind
> > assertion' and I don't understand at all how it works. The python
> > docs give the following example:
>
> > " (?<=abc)def will find a match in "abcdef", since the lookbehind
> > will back up 3 characters and check if the contained pattern matches."
>
> > Can anyone emphasize more on what this RE operation does? Thanks.
>
> Have you actually tried it out?
>
> >>> import re
> >>> r = re.compile(r'(?<=abc)def')
> >>> m1 = r.search('bcde')
> >>> m1.group()'def'
> 'def'
> >>> m2 = r.search('bcdefff')
> >>> m2 == None
> True
>
> So, it matches 'def' but only if it is immediately preceded by 'abc'.
Any idea what this positive lookbehind achieves which can't be done
without it.
I remember cases where positive look-ahead is useful.
In the above example, r.search('abcdef') does the job of ensuring
'def' is preceded by 'abc'.
Karthik
>
> Erik Jones
>
> Software Developer | Emma®
> [EMAIL PROTECTED]
> 800.595.4401 or 615.292.5888
> 615.292.0777 (fax)
>
> Emma helps organizations everywhere communicate & market in style.
> Visit us online athttp://www.myemma.com
--
http://mail.python.org/mailman/listinfo/python-list
Re: A question about subprocess
On Oct 3, 9:46 am, JD <[EMAIL PROTECTED]> wrote: > Hi, > > I want send my jobs over a whole bunch of machines (using ssh). The > jobs will need to be run in the following pattern: > > (Machine A) (Machine B) (Machine C) > > Job A1 Job B1Job C1 > > Job A2 Job B2etc > > Job A3 etc > > etc > > Jobs runing on machine A, B, C should be in parallel, however, for > each machine, jobs should run one after another. > > How can I do it with the subprocess? subprocess is not network aware. What you can do is write a simple python script say run_jobs.py which can take in a command-line argument (say A or B or C) and will fire a sequence of subprocesses to execute a series of jobs. This will ensure the serialization condition like A2 starting after A1's completion. Now you can write a load distributer kind of script which uses ssh to login to the various machines and run run_jobs.py with appropriate argument (Here I assume all machines have access to run_jobs.py -- say it may reside on a shared mounted file-system). e.g. in outer script: ssh machine-A run_jobs.py A ssh machine-B run_jobs.py B ssh machine-B run_jobs.py C ... You may want to fire all these at once so that they all execute in parallel. Karthik > > Thanks, > > JD -- http://mail.python.org/mailman/listinfo/python-list
Re: matching a street address with regular expressions
On Oct 10, 10:02 am, "Shawn Milochik" <[EMAIL PROTECTED]> wrote:
> On 10/4/07, Ricardo Aráoz <[EMAIL PROTECTED]> wrote:
>
>
>
> > Christopher Spears wrote:
> > > One of the exercises in Core Python Programming is to
> > > create a regular expression that will match a street
> > > address. Here is one of my attempts.
>
> > >>>> street = "1180 Bordeaux Drive"
> > >>>> patt = "\d+ \w+"
> > >>>> import re
> > >>>> m = re.match(patt, street)
> > >>>> if m is not None: m.group()
> > > ...
> > > '1180 Bordeaux'
>
> > > Obviously, I can just create a pattern "\d+ \w+ \w+".
> > > However, the pattern would be useless if I had a
> > > street name like 3120 De la Cruz Boulevard. Any
> > > hints?
>
> Also, that pattern can be easily modified to have any number of words
> at the end:
> patt = "\d+ (\w+){1,}"
> This would take care of 3120 De la Cruz Boulevard.
\w doesn't take care of white-space. Following will work.
patt = r"\d+ (\w+\s*){1,}"
BTW {1,} is same as +. So
patt = r"\d+ (\w+\s*)+"
will work as well.
Note that using raw-string for re pattern is safer in most uses.
Karthik
--
http://mail.python.org/mailman/listinfo/python-list
Re: negative base raised to fractional exponent
On Oct 16, 2:48 pm, [EMAIL PROTECTED] wrote: > Does anyone know of an approximation to raising a negative base to a > fractional exponent? For example, (-3)^-4.1 since this cannot be > computed without using imaginary numbers. Any help is appreciated. Use complex numbers. They are part of python (no special modules needed). Just write your real number r, as r+0j e.g. square-root of -4 is 2j >>> (-4+0j)**(0.5) (1.2246063538223773e-16+2j) # real part is almost zero >>> >>> (-4.234324+0j)**(0.5) (1.2599652164116278e-16+2.0577473119894969j) >>> 2.0577473119894969j ** 2 (-4.234324+0j) >>> Karthik -- http://mail.python.org/mailman/listinfo/python-list
Re: Easiest way to get exit code from os.popen()?
On Oct 24, 12:07 pm, mrstephengross <[EMAIL PROTECTED]>
wrote:
> Hi folks. I'm using os.popen() to run a command; according to the
> documentation, the filehandle.close() oepration is suppsoed to return
> the exit code. However, when I execute something like "exit 5",
> close() returns 1280. Here's the code:
>
> pipe = os.popen("exit 5")
> print pipe.close() # prints 1280
>
> Am I doing something wrong? Is there an easier way to get the exit
> code?
>>> print "%#x" % 1280
0x500 # first byte is your exit code; second gives signal info if
any
>>>
In any case, the best approach is to use the os module to interpret it
as given in other post (os.WEXITSTATUS(1280))
Karthik
>
> Thanks,
> --Steve
--
http://mail.python.org/mailman/listinfo/python-list
Re: simple question on dictionary usage
On Oct 26, 9:29 pm, Frank Stutzman <[EMAIL PROTECTED]> wrote:
> My apologies in advance, I'm new to python
>
> Say, I have a dictionary that looks like this:
>
> record={'BAT': '14.4', 'USD': '24', 'DIF': '45', 'OAT': '16',
> 'FF': '3.9', 'C3': '343', 'E4': '1157', 'C1': '339',
> 'E6': '1182', 'RPM': '996', 'C6': '311', 'C5': '300',
> 'C4': '349', 'CLD': '0', 'E5': '1148', 'C2': '329',
> 'MAP': '15', 'OIL': '167', 'HP': '19', 'E1': '1137',
> 'MARK': '', 'E3': '1163', 'TIME': '15:43:54',
> 'E2': '1169'}
>
> From this dictionary I would like to create another dictionary calld
> 'egt') that has all of the keys that start with the letter 'E'. In
> otherwords it should look like this:
>
> egt = {'E6': '1182','E1': '1137','E4': '1157','E5': '1148',
>'E2': '1169','E3': '1163'}
>
> This should be pretty easy, but somehow with all my googling I've
> not found a hint.
One possible solution (read list-comprehension if you not familiar
with it):
>>> record={'BAT': '14.4', 'USD': '24', 'DIF': '45', 'OAT': '16',
... 'FF': '3.9', 'C3': '343', 'E4': '1157', 'C1': '339',
... 'E6': '1182', 'RPM': '996', 'C6': '311', 'C5': '300',
... 'C4': '349', 'CLD': '0', 'E5': '1148', 'C2': '329',
... 'MAP': '15', 'OIL': '167', 'HP': '19', 'E1': '1137',
... 'MARK': '', 'E3': '1163', 'TIME': '15:43:54',
... 'E2': '1169'}
>>> egt = dict([(k, record[k]) for k in record if k.startswith('E')])
>>> egt
{'E5': '1148', 'E4': '1157', 'E6': '1182', 'E1': '1137', 'E3': '1163',
'E2': '1169'}
Karthik
>
> Thanks in advance
>
> --
> Frank Stutzman
--
http://mail.python.org/mailman/listinfo/python-list
Re: Running long script in the background
On Feb 6, 5:26 am, "[EMAIL PROTECTED]" <[EMAIL PROTECTED]> wrote: > Hello, > > I am trying to write a python cgi that calls a script over ssh, the > problem is the script takes a very long time to execute so Apache > makes the CGI time out and I never see any output. The script is set > to print a progress report to stdout every 3 seconds but I never see > any output until the child process is killed. > > Here's what I have in my python script: > > command = "ssh -l root %s /scripts/xen/xen-create-win-vps1.sh %s" % > (host, domuname) > output = os.popen(command) Apart from other buffering issues, it could be very well that ssh returns all the output in one single big chunk. Try running the ssh command (with the trailing 'command') from your shell and see if it generates output immediately. There may be some option to make ssh not buffer the data it reads from the remove command execution. If there is no such option, most likely you are out of luck. In this case, even if you making your remote script unbufferred, ssh may be buffering it. If both the machines have any shared filesystem, you can do a trick. Make your script write it's output unbuffered to a file. Since the file is mounted and available on both the machines.. start reading the file from this main python script (note that you may need a thread to do it, as your script will anyway be stuck waiting for the ssh to complete). Karthik > for line in output: >print line.strip() > > Here's a copy of the bash script. > > http://watters.ws/script.txt > > I also tried using os.spawnv to run ssh in the background and nothing > happens. > > Does anybody know a way to make output show in real time? -- http://mail.python.org/mailman/listinfo/python-list
auto-increment operator - why no syntax error?
I see python doesn't have ++ or -- operators unlike say, C. I read some reasonings talking about immutable scalars and using ++/-- doesn't make much sense in python (not sure if ++i is that far-fetched compared to the allowed i += 1) In any case, I accidentally wrote ++n in python and it silently accepted the expression and it took me a while to debug the problem. Why are the following accepted even without a warning about syntax error? (I would expect the python grammar should catch these kind of syntax errors) >>> n = 1 >>> 2 * + n 2 >>> n += 1 >>> n 2 >>> ++n 2 Karthik -- http://mail.python.org/mailman/listinfo/python-list
Re: Pexpect and a Linux Terminal
On Dec 24, 6:06 pm, "[EMAIL PROTECTED]" <[EMAIL PROTECTED]> wrote:
> hello,
>
> I'm new in Python and i would like to use Pexpect to execute a root
> command (i want to mount via a Pyhton script a drive)
>
> so that's my script for the moment :
>
> from os import *
> import pexpect
> import os
> cmd1="su -"
> cmd2="mount -o loop /home/user/my.iso /mnt/disk"
> pwd="mypassword"
>
> child = pexpect.spawn(cmd1)
> child.sendline('Mot de passe :')
Make that child.expect('Mot de passe :')
> child.sendline(pwd+"\r\n")
With sendline no need for the trailing "\r\n". Just do
child.sendline(pwd)
Here you may want to do something like
prompt = '.*#' # assumes your shell prompt for root ends in #
child.expect(prompt)
> child.sendline(cmd2)
Again add child.expect(prompt) so that you wait the completion of cmd2
and then child.close()
Karthik
>
> (is a French Terminal so 'Mot de passe' means Password :'
>
> After that i try to execute it, and nothing happened, i know how to
> lunch py file via python but i supposed the script don't detect the
> prompt Password.
>
> if anyone can help me please :)
>
> Have a nice day !
--
http://mail.python.org/mailman/listinfo/python-list
Re: Enumerating Regular Expressions
[EMAIL PROTECTED] wrote: > James Stroud wrote: > > You see the difficulty don't you? How will the computer know in advance > > that the regex matches only a finite set of possible strings? > > Well sure it might be a little difficult to figure _that_ out, although > probably not all that hard if you converted to an FSA or something. I > imagine detecting looping would be as simple as applying the right > graph algorithm. That's right. For finite regular language, you don't allow cycles. That means the graph must be a DAG (directed acyclic graph. If not directed it must be a tree, simple to check as node-count is edge-count plus one). Once you have a DAG, the problem becomes enumerating all paths from root node to any final node. This is a pretty straighforward problem in graph theory. I think you can simply find all from root-node to any other node and discard any of the path ending in a non-final state node. Karthik > > But that's not the point, you don't strictly need to know in advance > whether the regex represents a finite or infinite language. I just > want to enumerate it, if it's going to take longer than the life of the > universe and a stack size to match to do it, then so be it. It's > surely up to the user to be careful about how they form their > expressions. One way to get around it would be to disallow the * > operator in enumerations. > > Cheers, > -Blair -- http://mail.python.org/mailman/listinfo/python-list
Re: Memory leak in Python
[EMAIL PROTECTED] wrote:
> The amount of data I read in is actually small.
>
> If you see my algorithm above it deals with 2000 nodes and each node
> has ot of attributes.
>
> When I close the program my computer becomes stable and performs as
> usual. I check the performance in Performance monitor and using "top"
> and the total memory is being used and on top of that around half a gig
> swap memory is also being used.
>
> Please give some helpful pointers to overcome such memory errors.
>
> I revisited my code to find nothing so obvious which would let this
> leak happen. How to kill cross references in the program. I am kinda
> newbie and not completely aware of the finetuning such programming
> process.
>
I suspect you are trying to store each node's attributes in every other
node.
Basically you have a O(N^2) algorithm (in space and probably more in
time).
For N=2000, N^2 is pretty big and you see memory issues.
Try not to store O(N^2) information and see if you can just scale
memory requirements linearly in N. That is, see if you can store
attributes of a node at only one place per node.
I'm just guessing your implementation; but from what you say
(peer-to-peer), I feel there is a O(N^2) requirements. Also try
experimenting with small N (100 nodes say).
Thanks,
Karthik
> Thanks
>
>
> bruno at modulix wrote:
> > [EMAIL PROTECTED] wrote:
> > > I have a python code which is running on a huge data set. After
> > > starting the program the computer becomes unstable and gets very
> > > diffucult to even open konsole to kill that process. What I am assuming
> > > is that I am running out of memory.
> > >
> > > What should I do to make sure that my code runs fine without becoming
> > > unstable. How should I address the memory leak problem if any ? I have
> > > a gig of RAM.
> > >
> > > Every help is appreciated.
> >
> > Just a hint : if you're trying to load your whole "huge data set" in
> > memory, you're in for trouble whatever the language - for an example,
> > doing a 'buf = openedFile.read()' on a 100 gig file may not be a good
> > idea...
> >
> >
> >
> > --
> > bruno desthuilliers
> > python -c "print '@'.join(['.'.join([w[::-1] for w in p.split('.')]) for
> > p in '[EMAIL PROTECTED]'.split('@')])"
--
http://mail.python.org/mailman/listinfo/python-list
Re: Understanding tempfile.TemporaryFile
On Dec 27, 7:36 pm, Steven D'Aprano <[EMAIL PROTECTED]> wrote: > On Thu, 27 Dec 2007 21:17:01 -0600, Shane Geiger wrote: > > import tempfile > > tmp = tempfile.mktemp() > > > import os > > os.remove(tmp) > > Not only does that not answer the Original Poster's question, but I don't > think it does what you seem to think it does. > > >>> tmp = tempfile.mktemp() > >>> tmp > '/tmp/tmpZkS0Gj' > >>> type(tmp) > > >>> import os > >>> os.remove(tmp) > > Traceback (most recent call last): > File "", line 1, in > OSError: [Errno 2] No such file or directory: '/tmp/tmpZkS0Gj' > > You might like to read help(tempfile.mktemp). > > (By the way... the whole point of using tempfile is to avoid needing to > delete the file by hand afterwards.) FWIW tempfile.mkstemp needs explicit user deletion. And tempfile.mkstemp is recommended over tempfile.mktemp due to security reasons. Help on function mkstemp in module tempfile: mkstemp(suffix='', prefix='tmp', dir=None, text=False) mkstemp([suffix, [prefix, [dir, [text) User-callable function to create and return a unique temporary file. The return value is a pair (fd, name) where fd is the file descriptor returned by os.open, and name is the filename. If 'suffix' is specified, the file name will end with that suffix, otherwise there will be no suffix. If 'prefix' is specified, the file name will begin with that prefix, otherwise a default prefix is used. If 'dir' is specified, the file will be created in that directory, otherwise a default directory is used. If 'text' is specified and true, the file is opened in text mode. Else (the default) the file is opened in binary mode. On some operating systems, this makes no difference. The file is readable and writable only by the creating user ID. If the operating system uses permission bits to indicate whether a file is executable, the file is executable by no one. The file descriptor is not inherited by children of this process. Caller is responsible for deleting the file when done with it. <--- >>> Karthik > > -- > Steven -- http://mail.python.org/mailman/listinfo/python-list
Re: pexpect ssh login and ls | grep
On Dec 31 2007, 6:46 pm, crybaby <[EMAIL PROTECTED]> wrote:
> 1) what are these characters:
> \x1b]0;
> ~\x07\x1b[?1034h
>
> in line '\x1b]0;[EMAIL PROTECTED]:[EMAIL PROTECTED] ~]'?
These are probably escape sequences in your shell prompt string.
Typically they are interpreted by the terminal, like xterm, to update
title bar.
>
> 2) Also, how come I don't get 0 or 2(excuting ls command exit code)
> from result.split('\r\n')[0] or result.split('\r\n')[1] ?
I don't think your expect worked fine to capture the desired output.
>
> This is what I get:>>> import pexpect
> >>> child=pexpect.spawn('ssh [EMAIL PROTECTED]')
> >>> child.sendline("ls mytest.log > /dev/null 2>&1; echo $?")
> 41
> >>> child.before
> >>> print child.before
> None
> >>> print child.after
> None
before/after makes sense only after an expect. At this point there is
only a sendline; not expect. So the above None output is expected.
> >>> child.expect([pexpect.TIMEOUT, '\$ '])
> 1
1 implies it matched the second pattern. You want to use raw strings.
r'\$' or else the re sent down is a plain $ which re interprets as end
of buffer.
Most important here is your prompt doesn't end with a $ (it's
something like [EMAIL PROTECTED] ~]). Thus make it,
child.expect(r'.*]')
Try the ls command and rest of the statements.
Karthik
> >>> result=child.before
> >>> print result.split('\r\n')[1]
> [EMAIL PROTECTED] ~]
> >>> print result.split('\r\n')[0]
>
> Last login: Mon Dec 31 20:52:09 2007 from com1>>> print result.split('\r\n')
>
> ['Last login: Mon Dec 31 20:52:09 2007 from com1\r',
> '\x1b]0;[EMAIL PROTECTED]:[EMAIL PROTECTED] ~]']
--
http://mail.python.org/mailman/listinfo/python-list
Re: popen question
On Jan 8, 1:20 am, Robert Latest <[EMAIL PROTECTED]> wrote:
> Hello,
>
> look at this function:
>
> --
> def test():
> child = os.popen('./slow')
> for line in child:
> print line
> -
>
> The program "slow" just writes the numbers 0 through 9 on stdout, one line a
> second, and then quits.
>
> I would have expected the python program to spit out a numbers one by one,
> instead I see nothing for 10 seconds and then the whole output all at once.
>
> How can I get and process the pipe's output at the pace it is generated?
I've seen this problem and it took me a while to figure out what is
happening.
As other posts, I too first suspected it's a problem related to line/
full buffering on the sender side (./slow here).
It turned out that the "for line in child:" line in the iterator is
the culprit. The iterator does something like a child.readlines()
underneath (in it's __iter__ call) instead of a more logical single
line read.
Change your reading to force line-by-line read
e.g.
While True:
line = child.readline()
if not line: break
print line
Karthik
>
> Thanks,
>
> robert
--
http://mail.python.org/mailman/listinfo/python-list
Re: executing newgrp from python in current shell possible?
On Jan 12, 6:19 am, Svenn Are Bjerkem <[EMAIL PROTECTED]> wrote: > On Jan 9, 9:18 pm, Zentrader <[EMAIL PROTECTED]> wrote: > > > On Jan 9, 5:56 am, Svenn Are Bjerkem <[EMAIL PROTECTED]> > > wrote: > > > >I have been looking for a way to execute this command > > > as a part of a script, but it seems that the changes are only valid in > > > the context of the script and when the script exits, the current shell > > > still have the original "users" group setting. > > > I don't think you would want it any other way. Would you want a user > > to be able to change the group and have it remain permanently? Who's > > going to remember whether they were last in "A" or "B", and it opens > > up oportunities for the practical joker when you go to the restroom > > and leave the terminal on. Put the "change the group" code into a > > separate function in a separate file (with only you as the owner) and > > call it whenever you want to change groups. > > I am trying to create a script that make it easy for users in a design > team to create files that belong to the same group, but retain the > users uid. In order to make it visible that the user is creating files > with a different gid, the script will change the prompt to indicate > so. In a tcl solution I have now, the users home is changed to the > design area as some tools are reading and writing setup files into > $HOME. I have not found a way to change the gid in tcl so I turned to > python in hope that this scripting language could do so. > > The tcl solution spawns a new tcsh after setting several environment > variables and works quite well except for not being able to change > gid. And it is also a wish from my side to port this script to python. > > Is your suggestion to put "newgrp design" into a new file and then > exec this file in my python script? What happens to the group id of > the shell that called the python script in this case? I would try to > avoid spawning a new tcsh as this make execution of tools on a remote > computer difficult as the handover of command line arguments does not > seem to be handed over to the newly spawned shell. I may be > understanding something wrongly here. When you fork a process in unix/linux, the child inherits all of parents settings; *but* any future changes that is made in child process will not get reflected in the parent. So there is no way you can fire a process and some how get its setting back to the invoking shell (which could be anything btw, say bash/tcsh/ csh). Thus what you really want is start a wrapper python script, say setup_design.py In setup_design.py, call necessary os routines like setegid(egid); this will update the process settings of the process running setup_design.py. At the end of setup_design.py, do an exec call[1] to fire the user's shell (note that even if you use popen or other ways of forking a new process, things will work just fine) Whatever changes you made to the process environment will get propagated to the new shell. User must explicitly finish the new shell (say typing 'exit' on shell prompt) Karthik [1]. e.g. import os os.execvp(cmd_run[0], cmd_run) # cmd_run is probably ['/bin/bash'] > > -- > Svenn -- http://mail.python.org/mailman/listinfo/python-list
finding child cpu usage of a running child
Hi, Wondering if there is a way to measure a child process's cpu usage (sys and user) when the child is still running. I see os.times() working fine in my system (Linux 2.6.9-42.7.ELsmp), but it gives valid data only after the child has exited. When the child is alive, os.times() data for child is zero for both child-sys and child-user cpu. My script (process P1) launches child process P2 (using popen2.Popen3). P2 is a long running process (big compilation). Every minute or so, from P1, I want to measure how much cpu P2 has consumed and based on that I can make some estimate on the completion time of P2 (I have a rough idea how much total cpu P2 needs to complete). I understand it may be too expensive to update this information to the parent process when any of the child/grand-child completes; but wondering if any there is any way to get this info; the expensive operations is on-demand only when the request is made. Thanks, Karthik -- http://mail.python.org/mailman/listinfo/python-list
Re: finding child cpu usage of a running child
On Jan 25, 11:59 pm, Paddy <[EMAIL PROTECTED]> wrote: > On Jan 26, 5:43 am, Karthik Gurusamy <[EMAIL PROTECTED]> wrote: > > > > > Hi, > > > Wondering if there is a way to measure a child process's cpu usage > > (sys and user) when the child is still running. I see os.times() > > working fine in my system (Linux 2.6.9-42.7.ELsmp), but it gives valid > > data only after the child has exited. When the child is alive, > > os.times() data for child is zero for both child-sys and child-user > > cpu. > > > My script (process P1) launches child process P2 (using > > popen2.Popen3). P2 is a long running process (big compilation). Every > > minute or so, from P1, I want to measure how much cpu P2 has consumed > > and based on that I can make some estimate on the completion time of > > P2 (I have a rough idea how much total cpu P2 needs to complete). > > > I understand it may be too expensive to update this information to the > > parent process when any of the child/grand-child completes; but > > wondering if any there is any way to get this info; the expensive > > operations is on-demand only when the request is made. > > > Thanks, > > Karthik > > I had a similar requirement in December and found: > http://lilypond.org/~janneke/software/ > > proc-time.c and proc-time.py poll /proc/ files whilst command > is running to get stats. Great, thanks. From proc-time.py looks like all I want are the fields 13 to 16 of /proc//stat. And I see them updated in real time (probably the kernel does it on a periodic interrupt). Thanks, Karthik > > Enjoy, - Paddy. -- http://mail.python.org/mailman/listinfo/python-list
Re: read and readline hanging
On Jan 27, 11:08 am, Olivier Lefevre <[EMAIL PROTECTED]> wrote: > >> Indeed, if I do this interactively, I can tell after 3 lines that I've > >> gotten all there is to get right now and the fourth readline() call > >> hangs. > > > Can you really? > > Yes interactively: at the command prompt, you can tell when it's over > because you know the command you just sent and whether it requires an > answer and of which kind. Also, even if there is no answer you get a > fresh prompt when the interpreter is done. Consider pexpect module. It solves the exact problem you have. You can give a r.e. for prompt and it will take care to wait until collecting all output. It basically simulates a human typing to an interpreter. Karthik. > > > Unless there is some way to differentiate between the last line > > and all the other lines of a response, you can't really be sure. > > Yes, that has since occurred to me. I need to echo some magic string > after each command to know that I reached the end of the answer to > the previous command. In interactive mode the prompt fulfills that > role. > > > To check if there is something to read at this very moment, you > > can use any of the following methods: > > Thanks for all the suggestions! That is just what I needed. > > > - select.select() > > - the FIONREAD ioctl (the ioctl() function lives in the fcntl > > module, and the FIONREAD constant is in the termios module) > > - set the underlying file descriptor in non-blocking mode: > > flags = fcntl.fcntl(fd, fcntl.F_GETFL) > > fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NDELAY) > > After that, reads on the pipe will raise an IOError exception > > with the errorcode EWOULDBLOCK. > > That sounds like the simplest approach. > > > - start a thread that does blocking reads from the pipe, and > > puts the chunks it reads on a queue for your main thread to > > grab. > > Yes but my python threading is worse than rudimentary. I will look > into the `trheading` module suggested by the other poster. > > > For the last approach, you might be interested in my asyncproc > > module, which does exactly that. You can download it from > > <http://www.lysator.liu.se/~bellman/download/asyncproc.py>. > > OK, I'll look into that, too. > > Thanks again, > > -- O.L. -- http://mail.python.org/mailman/listinfo/python-list
Re: How to identify which numbers in a list are within each others' range
On Jan 31, 8:12 am, erikcw <[EMAIL PROTECTED]> wrote:
> Hi,
>
> I have a list of numbers each with a +/- margin of error. I need to
> identify which ones overlab each other.
>
> For example:
> 55 +/- 3
> 20 +/- 2
> 17 +/- 4
> 60 +/- 3
>
> #base, max, min
> list = [
> (55, 58, 52),
> (20, 22, 18),
> (17, 21, 13),
> (60, 63, 57),
> ]
>
> In this example the range of list[0] overlaps the range of list[3] AND
> list[1] overlaps list[2]
>
> What is the best way to in python to identify the list items that
> overlap and the items that don't overlap with any other.
Note you just need the left-end and right-end of each interval. Mean
is redundant information. Once you sort the interval, you can just go
from left to right, retaining only necessary information.
Below method is O(n log n) + O (nk) where k is the average overlaps
per interval.
On most average case, first term dominates and hence its O(n log n);
worst case is ofcourse O(n^2) (a simple case is all n intervals
overlapping with each other)
def strip_sort (a, b):
if a[0] < b[0]:
return -1
if a[0] > b[0]:
return 1
# To allow overlaps on a point, return L first then R
# If overlap on a point must not be allowed, return 1 below
if a[0] == 'L': return -1
return 0
def overlaps (strips_given):
# split each interval into two items. basically decorate with 'L'
for left-end of the interval, 'R' for right end of the interval
s2 = [((s[0], 'L', i) , (s[1], 'R', i)) for i,s in
enumerate(strips_given)]
strips = []
for s in s2: # flatten out the tuples
strips.append(s[0])
strips.append(s[1])
clique = set() # set of nodes where each overlap with everyone
else in the set
edges = [] # we are constructing a graph on N nodes where edge
i,j implies i and j overlap
# below is O(N log N) where is N is number of intervals
strips.sort(cmp=strip_sort)
for s in strips:
node = s[2]
if s[1] == 'L':
clique.add(node)
if s[1] == 'R':
# below is O(k) where k is clique size (overlaps per
interval)
new_edges = [(node, i) for i in clique if i != node]
edges += new_edges
clique.remove(node)
return edges
def main():
lst = [(52, 58), (18, 22), (13, 21), (57, 63)]
print overlaps(lst)
Output:
[(2, 1), (0, 3)]
Karthik
>
> Thanks!
> Erik
--
http://mail.python.org/mailman/listinfo/python-list
Re: clocking subprocesses
On Mar 3, 9:57 am, [EMAIL PROTECTED] wrote: > Hi, > > I've seen several threads on this subject, but haven't (yet) run > across one that answers my specific questions. This should be really > easy for someone, so here goes: > > I'm running some numerical simulations under Ubuntu, and using Python > as my scripting language to automatically manage input and output. I > need to have a precise performance measurement (CPU time) of how long > it takes to run my simulations. > > Right now I have something like: > > stime = time.time() > subprocess.call(["./mysim","args"]) > ftime = time.time() > print ftime-stime > > However, time.time() only gives wall-clock time, so I'm also measuring > the time it takes to run other processes running at the same time. > What I'd rather have is: > > stime = time.clock() > subprocess.call(["./mysim","args"]) > ftime = time.clock() > print ftime-stime > > But this, of course, usually outputs 0, because time.clock() does not > count the CPU ticks of the subprocess. > > So, long story short, I need to get CPU time of something I call using > subprocess.call(). I don't want to have to profile my code, since it > will significantly reduce computation time. Use os.times(). It returns a 5-tuple and what you want is child cpu times. times(...) times() -> (utime, stime, cutime, cstime, elapsed_time) Return a tuple of floating point numbers indicating process times. cutime+cstime will give you the total CPU used by child (your simulation). Karthik > > Thanks for the advice. > > Kevin -- http://mail.python.org/mailman/listinfo/python-list
Re: Can read() be non-blocking?
On Nov 6, 2:54 pm, Thomas Christensen <[EMAIL PROTECTED]>
wrote:
> This issue has been raised a couple of times I am sure. But I have yet
> to find a satisfying answer.
>
> I am reading from a subprocess and this subprocess sometimes hang, in
> which case a call to read() call will block indefinite, keeping me from
> killing it.
>
> The folloing sample code illustrates the problem:
>
> proc = subprocess.Popen(['/usr/bin/foo', '/path/to/some/file'],
> stdout=subprocess.PIPE)
> output = StringIO.StringIO()
> while True:
> r = select.select([proc.stdout.fileno()], [], [], 5)[0]
> if r:
> # NOTE: This will block since it reads until EOF
> data = proc.stdout.read()
> if not data:
> break # EOF from process has been reached
> else:
> output.write(data)
> else:
> os.kill(proc.pid, signal.SIGKILL)
> proc.wait()
>
>
>
> As the NOTE: comment above suggests the call to read() will block here.
>
> I see two solutions:
>
> 1. Read one byte at a time, meaning call read(1).
> 2. Read non-blocking.
>
> I think reading one byte at a time is a waste of CPU, but I cannot find
> a way to read non-blocking.
>
> Is there a way to read non-blocking? Or maybe event a better way in
> generel to handle this situation?
>From what I understand, you want a way to abort waiting on a blocking
read if the process is hung.
There are some challenges about how you decide if the process is hung
or just busy doing work without generating output for a while (or may
be the system is busy and the process didn't get enough CPU due to
other CPU hungry processes).
Assuming you have a valid way to figure this out, one option is to
have a timeout on the read.
If the timeout exceeds, you abort the read call. No, the read doesn't
provide a timeout, you can build one using alarm.
def alarm_handler(*args):
""" This signal stuff may not work in non unix env """
raise Exception("timeout")
signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(timeout) # say timeout=60 for a max wait of 1
minute
data = proc.stdout.read()
except Exception, e:
if not str(e) == 'timeout': # something else went wrong ..
raise
# got the timeout exception from alarm .. proc is hung; kill it
Karthik
>
> Thanks
>
> Thomas
--
http://mail.python.org/mailman/listinfo/python-list
Re: Subprocess module: running an interactive shell
On Mar 13, 6:39 pm, Roman Medina-Heigl Hernandez
wrote:
> Hi,
>
> I'm experimenting with Python and I need a little help with this. What I'd
> like is to launch an interactive shell, having the chance to send first
> several commands from python. I've written the following code:
>
> =
>
> #!/usr/bin/env python
>
> import sys, subprocess
>
> exe = "/bin/sh"
> params = "-i"
-i says shell to be interactive. So looks like it is directly trying
to read from the terminal.
>
> proc = subprocess.Popen([exe, params], stdin=subprocess.PIPE)
proc = subprocess.Popen([exe,], stdin=subprocess.PIPE)
works for me; but if there is an error 'sh' terminates.
If you want to simulate interactive, explore the pexpect module.
>
> proc.stdin.write("id\n")
>
> while True:
> line = sys.stdin.readline()
> if not line:
note that a simple enter terminates the shell which you may not want.
> break
> proc.stdin.write(line)
>
> sys.exit()
>
> =
>
> The problem is that when I launch it, python proggy is automatically
> suspended. The output I got is:
>
> ro...@rslabs:~/pruebas$ ./shell.py
> ro...@rslabs:~/pruebas$ uid=1000(roman) gid=1000(roman) groups=1000(roman)
> ro...@rslabs:~/pruebas$
>
> [2]+ Stopped ./shell.py
> ro...@rslabs:~/pruebas$
>
> Why and how to fix it? Would you suggest a better and more elegant way to
> do what I want?
As I see it, 'sh' is attempting to read from the keyboard and not from
stdin.
Karthik
>
> Thank you.
>
> --
>
> Saludos,
> -Roman
>
> PGP Fingerprint:
> 09BB EFCD 21ED 4E79 25FB 29E1 E47F 8A7D EAD5 6742
> [Key ID: 0xEAD56742. Available at KeyServ]
--
http://mail.python.org/mailman/listinfo/python-list
Re: Subprocess module: running an interactive shell
On Mar 14, 3:03 am, Roman Medina-Heigl Hernandez
wrote:
> Karthik Gurusamy escribió:
>
>
>
> > On Mar 13, 6:39 pm, Roman Medina-Heigl Hernandez
> > wrote:
> >> Hi,
>
> >> I'm experimenting with Python and I need a little help with this. What I'd
> >> like is to launch an interactive shell, having the chance to send first
> >> several commands from python. I've written the following code:
>
> >> =
>
> >> #!/usr/bin/env python
>
> >> import sys, subprocess
>
> >> exe = "/bin/sh"
> >> params = "-i"
>
> > -i says shell to be interactive. So looks like it is directly trying
> > to read from the terminal.
>
> Well, then the question will be: is there any way to tell python to
> directly "map" the terminal to the subprocess?
pexpect seems to be the solution for such problems :). [other
applications include ssh which asks for password from terminal (not
ssh's stdin)]
http://pexpect.sourceforge.net/pexpect.html
>
> >> proc = subprocess.Popen([exe, params], stdin=subprocess.PIPE)
>
> > proc = subprocess.Popen([exe,], stdin=subprocess.PIPE)
>
> > works for me; but if there is an error 'sh' terminates.
>
> > If you want to simulate interactive, explore the pexpect module.
>
> I'll get it a try :)))
>
> >> proc.stdin.write("id\n")
>
> >> while True:
> >> line = sys.stdin.readline()
> >> if not line:
>
> > note that a simple enter terminates the shell which you may not want.
>
> Test my code and you'll see that this is not true :) When you hit enter
> line will contain '\n' so it's not empty.
You are right. I thought readline() strips the trailing \n (It doesn't
and shouldn't as it's necessary for the case a file ends without a
newline).
>
> >> break
> >> proc.stdin.write(line)
>
> Btw, another curiosity I have: is it possible to make a print not
> automatically add \n (which is the normal case) neither " " (which happens
> when you add a "," to the print sentence)? I found an alternative not
> using print at all, eg: sys.stdout.write("K"). But it resulted strange
> to me having to do that trick :)
I am also aware of only the sys.stdout.write solution.
python3.0 has a way to do it.
>>> help(print)
Help on built-in function print in module builtins:
print(...)
print(value, ..., sep=' ', end='\n', file=sys.stdout)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
file: a file-like object (stream); defaults to the current
sys.stdout.
sep: string inserted between values, default a space.
end: string appended after the last value, default a newline.
>>> print('hello', end='')
hello>>>
Karthik
>
> Thank you for all your comments and comprenhension.
>
> -r
>
>
>
> >> sys.exit()
>
> >> =
>
> >> The problem is that when I launch it, python proggy is automatically
> >> suspended. The output I got is:
>
> >> ro...@rslabs:~/pruebas$ ./shell.py
> >> ro...@rslabs:~/pruebas$ uid=1000(roman) gid=1000(roman) groups=1000(roman)
> >> ro...@rslabs:~/pruebas$
>
> >> [2]+ Stopped ./shell.py
> >> ro...@rslabs:~/pruebas$
>
> >> Why and how to fix it? Would you suggest a better and more elegant way to
> >> do what I want?
>
> > As I see it, 'sh' is attempting to read from the keyboard and not from
> > stdin.
>
> > Karthik
>
> >> Thank you.
>
> >> --
>
> >> Saludos,
> >> -Roman
>
> >> PGP Fingerprint:
> >> 09BB EFCD 21ED 4E79 25FB 29E1 E47F 8A7D EAD5 6742
> >> [Key ID: 0xEAD56742. Available at KeyServ]
>
> > --
> >http://mail.python.org/mailman/listinfo/python-list
>
> --
>
> Saludos,
> -Roman
>
> PGP Fingerprint:
> 09BB EFCD 21ED 4E79 25FB 29E1 E47F 8A7D EAD5 6742
> [Key ID: 0xEAD56742. Available at KeyServ]
--
http://mail.python.org/mailman/listinfo/python-list
Re: ssh
On Apr 29, 6:29 pm, gert <[EMAIL PROTECTED]> wrote:
> Is this the best way to use ssh ?
> How can i use ssh keys instead of passwords ?
> I dont understand what happens when pid does not equal 0 , where does
> the cmd get executed when pid is not 0 ?
> How do you close the connection ?
>
> #http://mail.python.org/pipermail/python-list/2002-July/155390.html
> import os, time
>
> def ssh(user, rhost, pw, cmd):
> pid, fd = os.forkpty()
> if pid == 0:
> os.execv("/bin/ssh", ["/bin/ssh", "-l", user, rhost] + cmd)
> else:
> time.sleep(0.2)
> os.read(fd, 1000)
> time.sleep(0.2)
> os.write(fd, pw + "\n")
> time.sleep(0.2)
> res = ''
> s = os.read(fd, 1)
> while s:
> res += s
> s = os.read(fd, 1)
> return res
>
> print ssh('username', 'serverdomain.com', 'Password', ['ls -l'])
If ssh is being used interactively (such as being prompted from
password), look into pexpect module.
http://www.noah.org/wiki/Pexpect
Else try the subprocess module (or even commands module); these are
lot simpler to program than the more primitive os.execv/os.read/write.
If you have already setup keys, ssh should work passwordless whether
it's interactive or not (AFAIK).
Karthik
--
http://mail.python.org/mailman/listinfo/python-list
Re: help with list comprehension
On May 1, 8:01 pm, Yves Dorfsman <[EMAIL PROTECTED]> wrote:
> In the following script, m1() and m2() work fine. I am assuming m2() is
> faster although I haven't checked that (loops through the list twice instead
> of once).
>
> Now what I am trying to do is something like m3(). As currently written it
> does not work, and I have tried different ways, but I haven't managed to
> make it work.
>
> Is there a possibility ? Or is m2() the optimum ?
>
> Thanks.
>
> #!/usr/bin/python
>
> l = [ { 'colour': 'black', 'num': 0},
>{ 'colour': 'brown', 'num': 1},
>{ 'colour': 'red', 'num': 2},
>{ 'colour': 'orange', 'num': 3},
>{ 'colour': 'yellow', 'num': 4},
>{ 'colour': 'green', 'num': 5},
>{ 'colour': 'blue','num': 6},
>{ 'colour': 'violet', 'num': 7},
>{ 'colour': 'grey','num': 8},
>{ 'colour': 'white', 'num': 9}
> ]
>
> def m1():
>colours = [ e['colour'] for e in l ]
>nums= [ e['num']for e in l ]
>
> def m2():
>colours = []
>nums= []
>for e in l:
> colours.append(e['colour'])
> nums.append(e['num'])
>
> #def m3():
> # colours, nums = [ e['colour'], e['num'] for e in l ]
>
Looks like m1 is the cleanest; if you really want to run list-
comprehension once, one possible way:
>>> p = [ (e['colour'], e['num']) for e in l ]
>>> import operator
>>> map(operator.itemgetter(0), p)
['black', 'brown', 'red', 'orange', 'yellow', 'green', 'blue',
'violet', 'grey', 'white']
>>> map(operator.itemgetter(1), p)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>>
Karthik
> --
> Yves.http://www.SollerS.ca
--
http://mail.python.org/mailman/listinfo/python-list
Seeking ideas for a cron implementation
Hi, I'm working on a cron like functionality for my application. The outer loops runs continuously waking every x seconds (say x=180, 300, ..). It needs to know what events in cron has expired and for each event do the work needed. It's basically like unix cron or like a calendar application with some restrictions. The outer loop may come back a lot later and many events might have missed their schedule -- but this is okay.. We don't have to worry about missed events (if there were n misses, we just need to execute call back once). Let's take some examples [Let e denotes an event] e1: hour=1 min=30 # Run every day once at 1:30 AM e2: wday=0, hour=1 min=0 # run every Monday at 1 AM e3: month=10, day=10, hour=10 min=0 # run on October 10th, 10 AM every year class Cron_Event (object): def __init__ (year=None, month=None, day=None, hour=None ..etc) # do init class Cron (object): def __init__ (): # do init def event_add (e): # add an event def execute() # see if any events has "expired" .. call it's callback # I'm looking for ideas on how to manage the events here >From outer loop cron = Cron() # create various events like e1 = Cron_Event(hour=1) cron.event_add(e1) e2 = Cron_Event(wday=0, hour=1) cron.event_add(e2) while True: sleep x seconds (or wait until woken up) cron.execute() # do other work.. x may change here If I can restrict to hour and minute, it seems manageable as the interval between two occurrences is a constant. But allowing days like every Monday or 1st of every month makes things complicated. Moreover I would like each constraint in e to take on multiple possibilities (like every day at 1AM, 2 AM and 4 AM do this). I'm looking for solutions that can leverage datetime.datetime routines. My current ideas include for each e, track the next time it will fire (in seconds since epoch as given by time.time()). Once current time has passed that time, we execute the event. e.g. >>> datetime.datetime.now() datetime.datetime(2008, 8, 22, 13, 19, 54, 5567) >>> time.time() 1219436401.741966<--- compute event's next firing in a format like this >>> The problem seems to be how to compute that future point in time (in seconds since epoch) for a generic Cron_Event. Say how do I know the exact time in future that will satisfy a constraint like: month=11, wday=1, hour=3, min=30# At 3:30 AM on a Tuesday in November Thanks for your thoughts. Karthik -- http://mail.python.org/mailman/listinfo/python-list
Re: Storing Subprocess Results
On Sep 2, 7:16 am, topazcode <[EMAIL PROTECTED]> wrote:
> I am using the subprocess module to run some shell commands on a Linux
> system:
>
> import subprocess
> output = subprocess.call('''ssh server1 "uptime"''', shell=True)
>
> The above assigns the output variable with a return code, i.e. 0 in
> this case. How can I actually capture the data returned from
> subprocess.call, rather than just the return code? I'd like to have
> the output variable contain the uptime string in this case.
Probably commands module is a better choice for your problem:
>>> import commands
>>> commands.getoutput('fortune')
"While money can't buy happiness, it certainly lets you choose your own
\nform of misery."
>>>
Karthik
Any help
> is appreciated. Thanks.
--
http://mail.python.org/mailman/listinfo/python-list
Re: Seeking ideas for a cron implementation
On Aug 22, 1:51 pm, Sean DiZazzo <[EMAIL PROTECTED]> wrote: > On Aug 22, 1:30 pm, Karthik Gurusamy <[EMAIL PROTECTED]> wrote: > > > > > Hi, > > > I'm working on acronlike functionality for my application. > > The outer loops runs continuously waking every x seconds (say x=180, > > 300, ..). > > It needs to know what events incronhas expired and for each event do > > the work needed. > > > It's basically like unixcronor like a calendar application with some > > restrictions. The outer loop may come back a lot later and many events > > might have missed their schedule -- but this is okay.. We don't have > > to worry about missed events (if there were n misses, we just need to > > execute call back once). > > > Let's take some examples [Let e denotes an event] > > e1: hour=1 min=30 # Run every day once at > > 1:30 AM > > e2: wday=0, hour=1 min=0 # run every Monday at 1 AM > > e3: month=10, day=10, hour=10 min=0 # run on October 10th, 10 AM > > every year > > > class Cron_Event (object): > > def __init__ (year=None, month=None, day=None, hour=None ..etc) > > # do init > > > classCron(object): > > def __init__ (): > > # do init > > def event_add (e): > > # add an event > > def execute() > > # see if any events has "expired" .. call it's callback > > # I'm looking for ideas on how to manage the events here > > > From outer loop > >cron=Cron() > > # create various events like > > e1 = Cron_Event(hour=1) > >cron.event_add(e1) > > e2 = Cron_Event(wday=0, hour=1) > >cron.event_add(e2) > > > while True: > > sleep x seconds (or wait until woken up) > > cron.execute() > > # do other work.. x may change here > > > If I can restrict to hour and minute, it seems manageable as the > > interval between two occurrences is a constant. But allowing days like > > every Monday or 1st of every month makes things complicated. Moreover > > I would like each constraint in e to take on multiple possibilities > > (like every day at 1AM, 2 AM and 4 AM do this). > > > I'm looking for solutions that can leverage datetime.datetime > > routines. > > My current ideas include for each e, track the next time it will fire > > (in seconds since epoch as given by time.time()). Once current time > > has passed that time, we execute the event. e.g.>>> datetime.datetime.now() > > > datetime.datetime(2008, 8, 22, 13, 19, 54, 5567)>>> time.time() > > > 1219436401.741966 <--- compute event's next firing in a format like > > this > > > The problem seems to be how to compute that future point in time (in > > seconds since epoch) for a generic Cron_Event. > > > Say how do I know the exact time in future that will satisfy a > > constraint like: > > month=11, wday=1, hour=3, min=30 # At 3:30 AM on a Tuesday in > > November > > > Thanks for your thoughts. > > > Karthik > > I only scanned your message, but maybe datetime.timedelta() will > help.. > > >>> import datetime > >>> now = datetime.datetime.now() > >>> print now > > 2008-08-22 13:48:49.335225>>> day = datetime.timedelta(1) > >>> print day > 1 day, 0:00:00 > >>> print now + day > > 2008-08-23 13:48:49.335225 Thanks, I found using a more efficient algorithm tricky and seemed error prone. [I do welcome ideas still if anyone has a cool solution] I used your idea and took the easy way out by using a brute-force search. Here is an outline if anyone faces similar problem: hours, minutes are lists: say for every day at 1:30 pm and 2:45 pm, hours=[13, 14] and minutes=[30, 45,]. I restricted myself to minutes and hours (and every day) to simplify the problem. def set_expiry_time_check_in_a_day (self, now, target, hours, mins, flags=set()): """ A small utility routine to simulate 'goto' Looks like now could be computed inside this function -- the small drift due to time taken in this function should be negligible """ # let's see if in today we can find an expiry # we do brute force search starting with the smallest hour for hour in hours: for min in mins: target = target.replace(hour=hour, minute=min, second=0, microsecond=0) if 'is_debug_1' in flags
Newbie Question:Please help
Hi, I am a newbie to python and I hope this is not a stupid question. I am trying to run a main method from a Python command line using the command shell using the command. python main_test.py I get the following error. File "", line 1 python main_test.py Syntax Error: invalid syntax My main file main_test.py is given below. #!/usr/bin/env python """ Test method to run the main method. """ def main(): print "Main method called."; if __name__ = "__main__": main() -- http://mail.python.org/mailman/listinfo/python-list
Re: multiple processes, private working directories
On Sep 24, 6:27 pm, Tim Arnold <[EMAIL PROTECTED]> wrote: > I have a bunch of processes to run and each one needs its own working > directory. I'd also like to know when all of the processes are > finished. > > (1) First thought was threads, until I saw that os.chdir was process- > global. > (2) Next thought was fork, but I don't know how to signal when each > child is > finished. > (3) Current thought is to break the process from a method into a > external > script; call the script in separate threads. This is the only way I > can see > to give each process a separate dir (external process fixes that), and > I can > find out when each process is finished (thread fixes that). > > Am I missing something? Is there a better way? I hate to rewrite this > method > as a script since I've got a lot of object metadata that I'll have to > regenerate with each call of the script. Use subprocess; it supports a cwd argument to provide the given directory as the child's working directory. Help on class Popen in module subprocess: class Popen(__builtin__.object) | Methods defined here: | | __del__(self) | | __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, st derr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, un iversal_newlines=False, startupinfo=None, creationflags=0) | Create new Popen instance. You want to provide the cwd argument above. Then once you have launched all your n processes, run thru' a loop waiting for each one to finish. # cmds is a list of dicts providing details on what processes to run.. what it's cwd should be runs = [] for c in cmds: run = subprocess.Popen(cmds['cmd'], cwd = cmds['cwd'] . etc other args) runs.append(run) # Now wait for all the processes to finish for run in runs: run.wait() Note that if any of the processes generate lot of stdout/stderr, you will get a deadlock in the above loop. Then you way want to go for threads or use run.poll and do the reading of the output from your child processes. Karthik > > thanks for any suggestions, > --Tim Arnold -- http://mail.python.org/mailman/listinfo/python-list
